code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from unittest import TestCase
from lib import unidades
from lib.meos import MEoS
class C1Oleate(MEoS):
"""Multiparameter equation of state for methyl oleate"""
name = "methyl oleate"
CASNumber = "112-62-9"
formula = "C19H36O2"
synonym = ""
_refPropName = "MOLEATE"
_coolPropName = "MethylOleate"
rhoc = unidades.Density(241.000222029)
Tc = unidades.Temperature(782.0)
Pc = unidades.Pressure(1246.0, "kPa")
M = 296.48794 # g/mol
Tt = unidades.Temperature(253.47)
Tb = unidades.Temperature(627.18)
f_acent = 0.91
momentoDipolar = unidades.DipoleMoment(1.63, "Debye")
# id = 919
f = 8.314472
CP1 = {"an": [90.2385/f], "pow": [0.146118],
"ao_exp": [234.797/f, 335.768/f, 431.66/f],
"exp": [613.529, 1405.31, 2867.76]}
huber = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for methyl oleate of Huber "
"et al. (2009).",
"__doi__": {"autor": "Huber, M.L., Lemmon, E.W., Kazakov, A., Ott, "
"L.S., Bruno, T.J.",
"title": "Model for the Thermodynamic Properties of a "
"Biodiesel Fuel",
"ref": "Energy Fuels, 23 (7) (2009) 3790–3797",
"doi": "10.1021/ef900159g"},
"R": 8.314472,
"cp": CP1,
"ref": "NBP",
"Tmin": Tt, "Tmax": 1000.0, "Pmax": 50000.0, "rhomax": 3.05,
"nr1": [0.4596121e-1, 2.2954, -3.554366, -0.2291674, 0.6854534e-1],
"d1": [4, 1, 1, 2, 3],
"t1": [1, 0.34, 1.14, 1.4, 0.6],
"nr2": [-1.535778, -0.7334697, 1.712700, -1.471394, -0.1724678e-1],
"d2": [1, 3, 2, 2, 7],
"t2": [3.3, 4.1, 1.9, 3.8, 1.3],
"c2": [2, 2, 1, 2, 1],
"gamma2": [1]*5,
"nr3": [0.2115470e1, -0.7555374, -0.4134269],
"d3": [1, 1, 3],
"t3": [3.4, 3.8, 4.0],
"alfa3": [1.1, 1.6, 1.1],
"beta3": [0.9, 0.65, 0.75],
"gamma3": [1.14, 0.65, 0.77],
"epsilon3": [0.79, 0.9, 0.76],
"nr4": []}
eq = huber,
_vapor_Pressure = {
"eq": 3,
"n": [-0.13900e2, 0.16246e2, -0.15568e2, -0.73568e1, -0.48739e1],
"t": [1.0, 1.5, 1.93, 4.2, 8.0]}
_liquid_Density = {
"eq": 1,
"n": [-0.19920e2, 0.12230e3, -0.23582e3, 0.21009e3, -0.73435e2],
"t": [0.461, 0.6, 0.75, 0.91, 1.05]}
_vapor_Density = {
"eq": 2,
"n": [-13.426, 1.8069e2, -1.1108e3, 1.3265e3, -4.6421e2, -2.1070e2],
"t": [0.667, 1.71, 2.2, 2.46, 3.0, 9.7]}
thermo0 = {"__name__": "Perkins (2010)",
"__doi__": {
"autor": "Perkins, R.A., Huber, M.L.",
"title": "Measurement and Correlation of the Thermal "
"Conductivities of Biodiesel Constituent Fluids: "
"Methyl Oleate and Methyl Linoleate",
"ref": "Energy Fuels 25(5) (2011) 2383-2388",
"doi": "10.1021/ef200417x"},
"eq": 1,
"Toref": 782.0, "koref": 1,
"no": [-2.7125e-4, 2.59365e-3, 0.0350241, -9.02273e-3],
"to": [0, 1, 2, 3],
"Tref_res": 782.0, "rhoref_res": 241, "kref_res": 1.,
"nr": [-0.0410106, 0.0606657, 0.0328443, -0.0498407,
-0.00418506, 0.0121752],
"tr": [0, 1, 0, 1, 0, 1],
"dr": [1, 1, 2, 2, 3, 3],
"critical": 3,
"gnu": 0.63, "gamma": 1.239, "R0": 1.03,
"Xio": 0.194e-9, "gam0": 0.0496, "qd": 8.75e-10, "Tcref": 1173}
_thermal = thermo0,
class Test(TestCase):
def test_Perkins(self):
# Critical enhancement can differ because the viscosity correlation
# in paper is not implemented in pychemqt
# Table 3, Pag 2386
st = C1Oleate(T=450, P=1e2)
self.assertEqual(round(st.rho, 8), 0.00792666)
self.assertEqual(round(st.k, 7), 0.0111019)
st = C1Oleate(T=450, P=1e6)
self.assertEqual(round(st.rho, 3), 764.716)
# self.assertEqual(round(st.k, 6), 0.123794)
st = C1Oleate(T=450, P=2e7)
self.assertEqual(round(st.rho, 3), 787.080)
# self.assertEqual(round(st.k, 6), 0.133856)
|
jjgomera/pychemqt
|
lib/mEoS/C1Oleate.py
|
Python
|
gpl-3.0
| 5,129
|
# PyeNalysis
__author__ = "Edwin Dalmaijer"
import copy
import numpy
from scipy.interpolate import interp1d
# DEBUG #
#from matplotlib import pyplot
# # # # #
def interpolate_blink(signal, mode='auto', velthresh=5, maxdur=500, margin=10, invalid=-1, edfonly=False):
"""Returns signal with interpolated results, based on a cubic or linear
interpolation of all blinks detected in the signal; based on:
https://github.com/smathot/exparser/blob/master/exparser/TraceKit.py
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal; alternatively a trial gaze data
dict as is returned by edfreader can be passed; in this
case the blink ending events will be used to find blinks
before the pupil size velocity algorithm will be used
(NOTE: this means both will be used successively!)
keyword arguments
mode -- string indicating what kind of interpolation to use:
'linear' for a linear interpolation
'cubic' for a cubic interpolation
'auto' for a cubic interpolation is possible (i.e.
when more than four data points are available)
and linear when this is not the case
(default = 'auto')
velthresh -- pupil size change velocity threshold in arbitrary
units per sample (default = 5)
maxdur -- maximal duration of the blink in samples
(default = 500)
margin -- margin (in samples) to compensate for blink duration
underestimatiom; blink is extended for detected start
minus margin, and detected end plus margin
(default = 10)
edfonly -- Boolean indicating whether blinks should ONLY be
detected using the EDF logs and NOT algorithmically
returns
signal -- a NumPy array containing the interpolated signal
"""
# # # # #
# input errors
# wrong interpolation method
if mode not in ['auto','linear','cubic']:
raise Exception("Error in pyenalysis.interpolate_missing: mode '%s' is not supported, please use one of the following: 'auto','linear','cubic'" % mode)
# wrong signal dimension
if type(signal) != dict:
if signal.ndim != 1:
raise Exception("Error in pyenalysis.interpolate_missing: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim)
# # # # #
# find blinks
# empty lists, to store blink starts and endings
starts = []
ends = []
# edfreader data
if type(signal) == dict:
# loop through blinks
for st, et, dur in signal['events']['Eblk']: # Eblk - list of lists, each containing [starttime, endtime, duration]
# edf time to sample number
st = numpy.where(signal['edftime']==st)[0]
et = numpy.where(signal['edftime']==et)[0]
# if the starting or ending time did not appear in the trial,
# correct the blink starting or ending point to the first or
# last sample, respectively
if len(st) == 0:
st = 0
else:
st = st[0]
if len(et) == 0:
et = len(signal['edftime'])
else:
et = et[0]
# compensate for underestimation of blink duration
if st-margin >= 0:
st -= margin
if et+margin < len(signal['size']):
et += margin
# do not except blinks that exceed maximal blink duration
if et-st <= maxdur:
# append start time and ending time
starts.append(st)
ends.append(et)
# extract pupil size data from signal
signal = signal['size']
if not edfonly:
# signal in NumPy array
# create a velocity profile of the signal
vprof = signal[1:]-signal[:-1]
# start detection
ifrom = 0
while True:
# blink onset is detected when pupil size change velocity exceeds
# threshold
l = numpy.where(vprof[ifrom:] < -velthresh)[0]
# break when no blink start is detected
if len(l) == 0:
break
# blink start index
istart = l[0]+ifrom
if ifrom == istart:
break
# reversal (opening of the eye) is detected when pupil size
# starts to increase with a super-threshold velocity
l = numpy.where(vprof[istart:] > velthresh)[0]
# if no reversal is detected, start detection process at istart
# next run
if len(l) == 0:
ifrom = istart
# reloop
continue
# index number of somewhat halfway blink process
imid = l[0] + istart
# a blink ending is detected when pupil size increase velocity
# falls back to zero
l = numpy.where(vprof[imid:] < 0)[0]
# if no ending is detected, start detection process from imid
# next run
if len(l) == 0:
ifrom = imid
# reloop
continue
# blink end index
iend = l[0]+imid
# start detection process from current blink ending next run
ifrom = iend
# compensate for underestimation of blink duration
if istart-margin >= 0:
istart -= margin
if iend+margin < len(signal):
iend += margin
# do not except blinks that exceed maximal blink duration
if iend-istart > maxdur:
# reloop
continue
# if all is well, we append start and ending to their respective
# lists
starts.append(istart)
ends.append(iend)
# # DEBUG #
# pyplot.figure()
# pyplot.title("" % ())
# pyplot.plot(signal,'ko')
# pyplot.plot(vprof,'b')
# # # # # #
# # # # #
# interpolate
# loop through all starting and ending positions
for i in range(len(starts)):
# empty list to store data points for interpolation
pl = []
# duration in samples
duration = ends[i]-starts[i]
# starting point
if starts[i] - duration >= 0:
pl.extend([starts[i]-duration])
# central points (data between these points will be replaced)
pl.extend([starts[i],ends[i]])
# ending point
if ends[i] + duration < len(signal):
pl.extend([ends[i]+duration])
# choose interpolation type
if mode == 'auto':
# if our range is wide enough, we can interpolate cubicly
if len(pl) >= 4:
kind = 'cubic'
# if not, we use a linear interpolation
else:
kind = 'linear'
else:
kind = mode[:]
# select values for interpolation function
x = numpy.array(pl)
y = signal[x]
# replace any invalid values with trial average
y[y==invalid] = numpy.mean(signal[signal!=invalid])
# create interpolation function
intfunc = interp1d(x,y,kind=kind)
# do interpolation
xint = numpy.arange(starts[i],ends[i])
yint = intfunc(xint)
# insert interpolated values into signal
signal[xint] = yint
# # DEBUG #
# y = numpy.zeros(len(pl)) + max(signal)
# pyplot.plot(pl,y,'ro')
# pyplot.plot(signal,'r')
# # # # # #
return signal
def interpolate_missing(signal, mode='auto', mindur=5, margin=10, invalid=-1):
"""Returns signal with interpolated results, based on a cubic or linear
interpolation of the invalid data in the signal
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal
keyword arguments
mode -- string indicating what kind of interpolation to use:
'linear' for a linear interpolation
'cubic' for a cubic interpolation
'auto' for a cubic interpolation is possible (i.e.
when more than four data points are available)
and linear when this is not the case
(default = 'auto')
mindur -- minimal amount of consecutive samples to interpolate
cubically; otherwise a linear interpolation is used;
this is to prevent weird results in the interpolation
of very short strings of missing data (default = 5)
margin -- margin (in samples) to compensate for missing duration
underestimatiom; missing is extended for detected start
minus margin, and detected end plus margin; this helps
in reducing errors in blink interpolation that has not
been done by interpolate_blink (default = 10)
invalid -- a single value coding for invalid data, e.g. -1 or 0.0
(default = -1)
returns
signal -- a NumPy array containing the interpolated signal
"""
# # # # #
# input errors
# wrong interpolation method
if mode not in ['auto','linear','cubic']:
raise Exception("Error in pyenalysis.interpolate_missing: mode '%s' is not supported, please use one of the following: 'auto','linear','cubic'" % mode)
# wrong signal dimension
if signal.ndim != 1:
raise Exception("Error in pyenalysis.interpolate_missing: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim)
# # # # #
# find successive strings of missing data
# empty lists for starting and ending indexes
starts = []
ends = []
# check if beginning sample is missing, and add to starting indexes if
# needed (algorithm does not pick up changes before the start or after
# the end if the signal)
if signal[0] == invalid:
starts.append(0)
si = 1
else:
si = 0
# find invalid data
inval = signal == invalid
# code connected strings of missing data 1
# (by substracting the previous number from the current, for every
# missing data index number: this will produce a value of 1 for
# successive index numbers, and higher values for nonsuccessive ones)
diff = numpy.diff(inval)
# find out what the index numbers of changes are
# (i.e.: every difference that is 1)
changes = numpy.where(diff==True)[0]
# loop through changes, finding start and begining index numbers for
# strings of successive missings
for i in range(si,len(changes),2):
ns = changes[i]-margin
if ns < 0:
ns = 0
starts.append(ns)
for i in range(1-si,len(changes),2):
ne = changes[i]+1+margin
if ne >= len(signal):
ne = len(signal)-1
ends.append(ne)
# if the signal ended on an invalid sample, add the ending index number
if signal[-1] == invalid:
ends.append(len(signal)-1)
# # # # #
# interpolate
# correct start and end point if these are invalid, by replacing them
# with the trial average
if signal[0] == invalid:
signal[0] = numpy.mean(signal[signal != invalid])
if signal[-1] == invalid:
signal[-1] = numpy.mean(signal[signal != invalid])
# loop through all starting and ending positions
for i in range(len(starts)):
# empty list to store data points for interpolation
pl = []
# duration in samples
duration = ends[i]-starts[i]
# starting point
if starts[i] - duration >= 0 and signal[starts[i]-duration] != invalid:
pl.extend([starts[i]-duration])
# central points (data between these points will be replaced)
pl.extend([starts[i],ends[i]])
# ending point
if ends[i] + duration < len(signal) and signal[ends[i]+duration] != invalid:
pl.extend([ends[i]+duration])
# if the duration is too low, use linear interpolation
if duration < mindur:
kind = 'linear'
# if the duration is long enough, choose interpolation type
else:
if mode == 'auto':
# if our range is wide enough, we can interpolate cubicly
if len(pl) >= 4:
kind = 'cubic'
# if not, we use a linear interpolation
else:
kind = 'linear'
else:
kind = mode[:]
# create interpolation function
x = numpy.array(pl)
y = signal[x]
intfunc = interp1d(x,y,kind=kind)
# do interpolation
xint = numpy.arange(starts[i],ends[i])
yint = intfunc(xint)
# insert interpolated values into signal
signal[xint] = yint
return signal
def remove_outliers(signal, maxdev=2.5, invalid=-1, interpolate=True, mode='auto', allowp=0.1):
"""Replaces every outlier with a missing value, then interpolates
missing values using pyenalysis.interpolate_missing
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal
keyword arguments
maxdev -- maximal distance between a single sample and the
signal average in standard deviations (default = 2.5)
invalid -- a single value coding for invalid data, e.g. -1 or 0.0;
outliers will be replaced by this value (default = -1)
interpolate -- Boolean indicating whether outliers should be
should be interpolated (True) or replaced by missing
values (False) (default = True)
mode -- string indicating what kind of interpolation to use:
'linear' for a linear interpolation
'cubic' for a cubic interpolation
'auto' for a cubic interpolation is possible (i.e.
when more than four data points are available)
and linear when this is not the case
(default = 'auto')
allowp -- is the standard deviation is below this proportion of
the mean, outliers will not be removed; this is to
prevent erroneous removal of outliers in a very steady
signal (default = 0.1)
returns
signal -- signal with outliers replaced by missing or
interpolated (depending on interpolate keyword
argument)
"""
# # # # #
# input errors
# wrong interpolation method
if mode not in ['auto','linear','cubic']:
raise Exception("Error in pyenalysis.interpolate_missing: mode '%s' is not supported, please use one of the following: 'auto','linear','cubic'" % mode)
# wrong signal dimension
if signal.ndim != 1:
raise Exception("Error in pyenalysis.interpolate_missing: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim)
# # # # #
# outlier removal
# calculate signal average and standard deviation
mean = numpy.mean(signal)
sd = numpy.std(signal)
# stop if SD is too low
if sd < mean*allowp:
return signal
# calculate bounds
lower = mean - maxdev*sd
upper = mean + maxdev*sd
# find outliers
outlier = (signal > upper) | (signal < lower)
# replace outliers by invalid code
signal[outlier] = invalid
# interpolate
if interpolate:
signal = interpolate_missing(signal, mode=mode, invalid=invalid)
return signal
def hampel(signal, winlen=12, T=3, focus='centre'):
"""Performs a Hampel filtering, a median based outlier rejection in which
outliers are detected based on a local median, and are replaced by that
median (local median is determined in a moving window)
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal
keyword arguments
winlen -- integer indicating window length (default = 12)
T -- floating point or integer indicating the maximal
distance from the surrounding signal that defines
outliers; distance is measured in a standard deviation
like measure (S0), based on the local median; a T of 3
means that any point outside of the range -3*S0 to 3*S0
is considered an outlier (default = 3)
focus -- string indicating where the focus (i.e. the point that
is being corrected) of the window should be; one of:
'centre' (window = winlen/2 + i + winlen/2)
'left' '(window = i + winlen)
'right' (window = winlen + i)
"""
if focus == 'centre':
# half a window length
hampwinlen = winlen/2
for i in range(hampwinlen, len(signal)-hampwinlen+1):
# median for this window
med = numpy.median(signal[i-hampwinlen:i+hampwinlen])
# check S0 (standard deviation like measure)
s0 = 1.4826 * numpy.median(numpy.abs(signal[i-hampwinlen:i+hampwinlen] - med))
# check outliers
if signal[i] > T*s0 or signal[i] < T*s0:
# replace outliers by median
signal[i] = med
# if the focus is not the centre
else:
# determine the starting position
if focus == 'left':
start = 0
stop = len(signal) - winlen
elif focus == 'right':
start = winlen
stop = len(signal)
else:
start = winlen/2
stop = len(signal) - winlen/2 + 1
# loop through samples
for i in range(start, stop):
# determine window start and stop
if focus == 'left':
wstart = i
wstop = i + winlen
elif focus == 'right':
wstart = i - winlen
wstop = i
else:
wstart = i - winlen/2
wstop = i + winlen/2
# median for this window
med = numpy.median(signal[wstart:wstop])
# check S0 (standard deviation like measure)
s0 = 1.4826 * numpy.median(numpy.abs(signal[wstart:wstop] - med))
# check outliers
if signal[i] > T*s0 or signal[i] < T*s0:
# replace outliers by median
signal[i] = copy.deepcopy(med)
return signal
def smooth(signal, winlen=11, window='hanning', lencorrect=True):
"""Smooth a trace, based on: http://wiki.scipy.org/Cookbook/SignalSmooth
arguments
signal -- a vector (i.e. a NumPy array) containing a single
trace of your signal
keyword arguments
winlen -- integer indicating window length (default = 11)
window -- smoothing type, should be one of the following:
'flat', 'hanning', 'hamming', 'bartlett', or 'blackman'
(default = 'hanning')
lencorrect -- Boolean indicating if the output (the smoothed signal)
should have the same length as the input (the raw
signal); this is not necessarily so because of data
convolution (default = True)
returns
signal -- smoothed signal
"""
# # # # #
# input errors
# really small window
if winlen < 3:
return signal
# non-integer window length
if type(winlen) != int:
try:
winlen = int(winlen)
except:
raise Exception("Error in pyenalysis.smooth: provided window length ('%s') is not compatible; please provide an integer window length" % winlen)
# wrong type of window
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise Exception("Error in pyenalysis.smooth: windowtype '%s' is not supported; please use one of the following: 'flat', 'hanning', 'hamming', 'bartlett', or 'blackman'" % window)
# wrong signal dimension
if signal.ndim != 1:
raise Exception("Error in pyenalysis.smooth: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim)
# too small a trace
if signal.size < winlen:
raise Exception("Error in pyenalysis.smooth: input signal has too few datapoints (%d) for provided window length (%d)" % (signal.size,winlen))
# # # # #
# smoothing
# slice to concatenation
s = numpy.r_[signal[winlen-1:0:-1],signal,signal[-1:-winlen:-1]]
# this is equivalent to:
# p1 = signal[winlen-1:0:-1].tolist() # first part of signal reversed
# p2 = signal.tolist()
# p3 = signal[-1:-winlen:-1].tolist() # last part of signal reversed
# s = p1 + p2 + p3
# moving average
if window == 'flat':
w = numpy.ones(winlen, 'd')
# bit more sophisticated smoothing algorithms
else:
w = eval("numpy.%s(%d)" % (window,winlen))
# convolve signal, according to chosen smoothing type
smoothed = numpy.convolve(w/w.sum(), s, mode='valid')
# correct length if necessary
if lencorrect:
smoothed = smoothed[(winlen/2-1):(-winlen/2)]
try:
smoothed = smoothed[:len(signal)]
except:
raise Exception("Error in pyenalysis.smooth: output array is too short (len(output)=%d, len(signal)=%d)" % (len(smoothed),len(signal)))
return smoothed
# DEBUG #
if __name__ == '__main__':
from matplotlib import pyplot
# constants
N = 200
INVAL = -1
# create random data
a = numpy.random.random_sample(N)
# manipulate radom data to look like somewhat realictic data
a = 10 + a*5
# introduce some missing values
a[0:10] = INVAL
a[50:66] = INVAL
a[150:190] = INVAL
a[-1] = INVAL
# introduce ouliers
for i in [15,16,18,40,197]:
a[i] = a[i] + numpy.random.random()*30
# plot the raw data
pyplot.figure()
pyplot.plot(a,'ko', label='raw')
# smooth the data
# a = smooth(a,winlen=5,lencorrect=True)
# plot the smoothed data
# pyplot.plot(a,'y', label='pre-smooth')
# interpolate 'blinks' (artificial, due to smoothing of fake data and missing)
# a = interpolate_blink(a, mode='auto', velthresh=5, maxdur=500, margin=10)
# plot interpolated data
# pyplot.plot(a,'b', label='blinks_interpolated')
# interpolate missing data
a = interpolate_missing(a,mode='linear',invalid=INVAL)
# plot interpolated data
pyplot.plot(a,'g', label='missing_interpolated')
# remove outliers
a = remove_outliers(a, maxdev=5, invalid=INVAL, interpolate=True, mode='auto')
# plot data without outliers
pyplot.plot(a,'m', label='outliers_removed')
# smooth the data
a = smooth(a,winlen=5,window='hanning',lencorrect=True)
# plot the smoothed data
pyplot.plot(a,'r', label='smooth')
# finish the plot
pyplot.legend(loc='upper right')
pyplot.show()
# # # # #
|
esdalmaijer/PyGazeAnalyser
|
pygazeanalyser/traces.py
|
Python
|
gpl-3.0
| 19,865
|
####
##functions for running tune_main.py
##dpaiton
import os
import re
## Generator (most times) function to return a range of values with a float step
## This is inclusive, which is different from Python's range() function
def frange(start, stop, step):
if start == stop or stop == 0 or step == 0:
yield start
else:
r = start
while r <= stop:
yield r
r += step
## Function to enable a layer or conn
def enable_block(start_line_num, output_lines):
for line_num in range(start_line_num,0,-1):
if 'disable' in output_lines[line_num]:
output_lines[line_num] = re.sub('true','false',output_lines[line_num],count=0)
break
return output_lines
## Function to uncomment a block of code from the given start line to the first time a blank line is found
def uncomment_block(start_line_num, output_lines):
for line_num in range(start_line_num,len(output_lines)):
com_line = output_lines[line_num]
if com_line == '\n':
return output_lines
output_lines[line_num] = com_line[2:]
return output_lines
|
dpaiton/OpenPV
|
projects/HyPerRetina/tuning/tune_functions.py
|
Python
|
epl-1.0
| 1,131
|
import unittest
from webtest import TestApp
import helloworld
import printenv
class TestModules(unittest.TestCase):
nosegae_modules = True
nosegae_user = True
nosegae_user_kwargs = {
'USER_IS_ADMIN': '1',
'USER_EMAIL': 'josh@example.org'
}
def test_frontend(self):
"""Tests the main module in app.yaml"""
frontend = TestApp(helloworld.APP)
response = frontend.get('/')
self.assertEqual(response.content_type, 'text/plain')
self.assertIn('Hello, webapp2 World!', response.body)
def test_mobile_frontend(self):
mobile = TestApp(printenv.APP)
response = mobile.get('/mobile/')
self.assertEqual(response.content_type, 'text/html')
self.assertIn('josh@example.org', response.body)
def test_static_backend(self):
work = TestApp(printenv.APP)
response = work.get('/work/')
self.assertEqual(response.content_type, 'text/html')
self.assertIn('josh@example.org', response.body)
|
Trii/NoseGAE
|
examples/modules_example/test.py
|
Python
|
bsd-2-clause
| 1,022
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import murano.common.messaging as messaging
CONF = cfg.CONF
def create_rmq_client():
rabbitmq = CONF.rabbitmq
connection_params = {
'login': rabbitmq.login,
'password': rabbitmq.password,
'host': rabbitmq.host,
'port': rabbitmq.port,
'virtual_host': rabbitmq.virtual_host,
'ssl': rabbitmq.ssl,
'ca_certs': rabbitmq.ca_certs.strip() or None
}
return messaging.MqClient(**connection_params)
|
sajuptpm/murano
|
murano/engine/system/common.py
|
Python
|
apache-2.0
| 1,076
|
"""This module imports all tests/unittests for the
pyfrp_gmsh_geometry."""
from pyfrp.modules import pyfrp_gmsh_geometry
from pyfrp.modules import pyfrp_misc_module
from pyfrp.modules import pyfrp_gmsh_IO_module
import numpy as np
def test_surfaceFuse():
"""Test surface fuse function.
Reads in .stl, tries to fuse a few surfaces and checks if new surface
lineLoop has proper number of vertices."""
d=pyfrp_gmsh_IO_module.readStlFile(pyfrp_misc_module.fixPath(pyfrp_misc_module.getMeshfilesDir()+"tests/surfaceFuse.stl"))
sfID=1
toFuseIDs=[2,3,4,5,6,7]
# Grab first surface
sf1=d.getRuledSurfaceById(sfID)[0]
# Try to successively fuse
for ID in toFuseIDs:
sf1.fuse(d.getRuledSurfaceById(ID)[0],debug=True)
assert pyfrp_misc_module.objAttrToList(sf1.lineLoop.getVertices(),'Id') == [1, 2, 4, 5, 6, 7, 8, 9, 3]
def test_domainSimplifySurfaces():
"""Test domain's simplify surfaces method.
Reads in .stl, tries to the geometry and checks if
it has been simplified properly."""
d=pyfrp_gmsh_IO_module.readStlFile(pyfrp_misc_module.fixPath(pyfrp_misc_module.getMeshfilesDir()+"tests/surfaceFuse.stl"))
d.simplifySurfaces(triangIterations=0,addPoints=False,fixSurfaces=False,debug=False,iterations=3)
sameNormal=d.getAllObjectsWithProp("ruledSurfaces","normal",np.array([0,-1.,0]))
assert len(sameNormal) == 1
def test_getAllSubElements():
"""Test gmshElement's getAllSubElements.
Creates cuboid domain and checks that sub-elements of first surface
have the right number.
"""
# Create domain
d=pyfrp_gmsh_geometry.domain()
# Add Cuboid
d.addCuboidByParameters([0,0,0],100,150,50,30.,plane="z",genLoops=True,genSurfaces=True,genVol=True)
assert len(d.ruledSurfaces[0].getAllSubElements())==13
|
mueller-lab/PyFRAP
|
pyfrp/tests/modules/test_gmsh_geometry.py
|
Python
|
gpl-3.0
| 1,754
|
#!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_mpr121 as upmMpr121
def main():
I2C_BUS = upmMpr121.MPR121_I2C_BUS
DEFAULT_I2C_ADDR = upmMpr121.MPR121_DEFAULT_I2C_ADDR
# Instantiate an MPR121 touch sensor on I2C
myTouchSensor = upmMpr121.MPR121(I2C_BUS, DEFAULT_I2C_ADDR)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit,
# including functions from myTouchSensor
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
def printButtons(touchSensor):
buttonPressed = False
outputStr = "Buttons Pressed: "
for i in range(12):
if (touchSensor.m_buttonStates & (1 << i)):
outputStr += (str(i) + " ")
buttonPressed = True
if (not buttonPressed):
outputStr += "None"
print(outputStr)
if (touchSensor.m_overCurrentFault):
print("Over Current Fault detected!")
while(1):
myTouchSensor.readButtons()
printButtons(myTouchSensor)
time.sleep(1)
if __name__ == '__main__':
main()
|
whbruce/upm
|
examples/python/mpr121.py
|
Python
|
mit
| 2,534
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake Risklib is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# OpenQuake Risklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with OpenQuake Risklib. If not, see
# <http://www.gnu.org/licenses/>.
import unittest
from openquake.risklib import scientific
class RiskCommonTestCase(unittest.TestCase):
def test_compute_bcr(self):
# numbers are proven to be correct
eal_orig = 0.00838
eal_retrofitted = 0.00587
retrofitting_cost = 0.1
interest = 0.05
life_expectancy = 40
expected_result = 0.43405
result = scientific.bcr(
eal_orig, eal_retrofitted, interest,
life_expectancy, 1, retrofitting_cost)
self.assertAlmostEqual(result, expected_result, delta=2e-5)
|
g-weatherill/oq-risklib
|
openquake/risklib/tests/benefit_cost_ratio_test.py
|
Python
|
agpl-3.0
| 1,272
|
# -*- encoding: utf-8 -*-
import os
from abjad.tools.abctools import ContextManager
class TemporaryDirectoryChange(ContextManager):
r'''A context manager for temporarily changing the current working
directory.
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Context managers'
__slots__ = (
'_directory',
'_original_directory',
'_verbose',
)
### INITIALIZER ###
def __init__(self, directory=None, verbose=None):
if directory is None:
pass
elif os.path.isdir(directory):
pass
elif os.path.isfile(directory):
directory = os.path.dirname(directory)
self._directory = directory
self._original_directory = None
if verbose is not None:
verbose = bool(verbose)
self._verbose = bool(verbose)
### SPECIAL METHODS ###
def __enter__(self):
r'''Enters context manager and changes to `directory`.
'''
self._original_directory = os.getcwd()
if self._directory is not None:
os.chdir(self.directory)
if self.verbose:
print('Changing directory to {}.'.format(self.directory))
return self
def __exit__(self, exc_type, exc_value, traceback):
r'''Exits context manager and returns to original working directory.
'''
if self._directory is not None:
os.chdir(self._original_directory)
if self.verbose:
print('Returning to {}.'.format(self.original_directory))
self._original_directory = None
def __repr__(self):
r'''Gets interpreter representation of context manager.
Returns string.
'''
return '<{}()>'.format(type(self).__name__)
### PUBLIC PROPERTIES ###
@property
def directory(self):
r'''Gets temporary directory of context manager.
Returns string.
'''
return self._directory
@property
def original_directory(self):
r'''Gets original directory of context manager.
Returns string.
'''
return self._original_directory
@property
def verbose(self):
r'''Is true if context manager prints verbose messages on entrance and
exit. Otherwise false.
Returns boolean.
'''
return self._verbose
|
mscuthbert/abjad
|
abjad/tools/systemtools/TemporaryDirectoryChange.py
|
Python
|
gpl-3.0
| 2,390
|
# -*- coding: utf-8 -*-
## Copyright 2008-2009 Luc Saffre.
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import codecs
from models import Unit, Entry, Units
#from menu import Units
from django.core import serializers
from django.test import TestCase
from django import forms
from lino.utils.validatingmodel import ModelValidationError
# convert a django.forms.util.ErrorDict object to a str
#~ def errordict2str(errordict):
#~ d={}
#~ for fieldname,errorlist in errordict.items():
#~ d[fieldname] = list(*errorlist)
#~ return d
class TestCase(TestCase):
def test01(self):
u1 = Unit(title="First Chapter")
u1.save()
self.assertEqual(u1.seq,1)
u = Unit(title="First Section",parent=u1)
u.save()
self.assertEqual(u.seq,1)
u = Unit(title="Second Section",parent=u1)
u.save()
self.assertEqual(u.seq,2)
self.assertEqual(u.pk,3)
try:
#print "set parent to self"
u.parent=u
u.save()
print "save() : done"
except ModelValidationError,e:
#s="\n".join([e.as_text() for m in e.messages])
self.assertEqual(str(e),"Parent cannot be self")
else:
self.fail("Expected ModelValidationError")
class PkkTestCase(TestCase):
fixtures=[ 'demo' ]
def setUp(self):
for u in Unit.objects.all():
u.save()
def test01(self):
unit=Unit.objects.get(pk=5)
# unit.save()
entries=unit.entry_set.all()
s="\n".join([e.word1 for e in entries])
#print s
self.assertEquals(s.split(), u"""
grand, grande
gentil, gentille
petit, petite
méchant, méchante
sale, sale
propre, propre
est-ce que
oui
non """.split())
#~ def test02(self):
#~ entries=Entry.objects.all()
#~ self.assertEquals(len(entries),192)
#~ entries=Entry.objects.filter(word1__startswith="p")
#~ self.assertEquals(len(entries),20)
#~ s="\n".join([e.word1 for e in entries])
#~ print s
#~ #print "test00",entries
#~ self.assertEquals(s,"""\
#~ père
#~ """)
def test03(self):
# total number of rows in each table:
units=Unit.objects.all()
self.assertEqual(len(units),6)
self.assertEqual(len(Entry.objects.all()),9)
pkk=unit=Unit.objects.get(pk=1)
self.assertEqual(len(pkk.children.all()),1)
#
# prettyprint()
#
s=pkk.prettyprint()
#print s
self.assertEqual(s,u"""\
1. Prantsuse keele kurs algajatele
1.1. Esimene tund
1.1.1. Sissejuhatus
1.1.2. Olema
1.1.3. Esimesed laused
1.1.4. Mees või naine?""")
#
#
#
entries=Entry.objects.filter(word1__startswith="p")
self.assertEquals(len(entries),2)
s="\n".join([e.word1 for e in entries])
#print s
self.assertEquals(s.split(),u"""
petit, petite
propre, propre
""".split())
#
# The first Report
#
rpt=Units()
s=rpt.as_text(column_widths=dict(id=3,title=30,parent=20,seq=3))
#print "\n"+s
self.assertEquals(s.split(),u"""
Units
=====
ID |title |name |parent |seq|format
---+------------------------------+---------+--------------------+---+---------
1 |Prantsuse keele kurs |pkk | |1 |R
|algajatele | | | |
2 |Esimene tund |u1 |1. Prantsuse keele |1 |R
| | |kurs algajatele | |
3 |Sissejuhatus | |1.1. Esimene tund |1 |R
4 |Olema | |1.1. Esimene tund |2 |R
5 |Esimesed laused | |1.1. Esimene tund |3 |R
6 |Mees või naine? | |1.1. Esimene tund |4 |R
""".split(),"Units().as_text() has changed in demo")
# Run these tests using "python manage.py test".
# see http://docs.djangoproject.com/en/dev/topics/testing/#topics-testing
|
MaxTyutyunnikov/lino
|
obsolete/voc/tests.py
|
Python
|
gpl-3.0
| 5,073
|
# import bpy
# def draw_wire(context):
# area = context.area
# if context.area.type == 'VIEW_3D':
# target = bpy.context.area.spaces.active
# # target.draw_handler_add()
|
TriumphLLC/FashionProject
|
modules/draw/wires.py
|
Python
|
gpl-3.0
| 183
|
"""User API"""
import arrow
from flask import render_template, request
from flask_mail import Message
from werkzeug.security import generate_password_hash, check_password_hash
from purepage.ext import r, db, abort, g, auth, mail
def gen_pwdhash(password):
"""生成密码hash"""
return generate_password_hash(password, method='pbkdf2:sha256')
class User:
"""
用户
$shared:
user:
id?str: 用户ID
role?str: 角色
github?url&optional: Github地址
avatar?url&default="http://purepage.org/static/avatar-default.png": 头像
date_create?datetime&optional: 创建时间
date_modify?datetime&optional: 修改时间
lastlogin_date?datetime&optional: 最近登录时间
""" # noqa
def post_signup(self, id, email, password):
"""
注册
$input:
id?str: 用户ID
email?email&optional: 邮箱
password?str: 密码
$output: @message
$error:
400.IDConflict: 用户ID已存在
400.EmailConflict: 邮箱已存在
"""
if db.run(r.table("user").get(id)):
abort(400, "IDConflict", "%s already exists" % id)
if email and db.first(r.table("user").get_all(email, index="email")):
abort(400, "EmailConflict", "%s already exists" % email)
db.run(r.table("user").insert({
"id": id,
"email": email,
"pwdhash": gen_pwdhash(password),
"role": "normal",
"date_create": arrow.utcnow().datetime,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
}))
return {"message": "OK"}
def check_password(self, user, password):
if not user:
abort(403, "UserNotFound", "帐号不存在")
if not check_password_hash(user["pwdhash"], password):
abort(403, "WrongPassword", "密码错误")
def post_login(self, account, password):
"""
登录
$input:
account?str: 用户ID或邮箱
password?str: 密码
$output: @user
$error:
403.UserNotFound: 帐号不存在
403.WrongPassword: 密码错误
"""
user = db.run(r.table("user").get(account))
if not user:
user = db.first(r.table("user").get_all(account, index="email"))
if not user:
abort(403, "UserNotFound", "帐号不存在")
self.check_password(user, password)
db.run(
r.table("user")
.get(user["id"])
.update({
"lastlogin_date": arrow.utcnow().datetime,
"lastlogin_ip": request.remote_addr,
"lastlogin_ua": request.headers.get('User-Agent'),
"timestamp": arrow.utcnow().timestamp
})
)
g.token = {"type": "login", "id": user["id"]}
return user
def get_me(self):
"""
获取自己的信息
$output: @user
"""
return g.user
def get(self, id):
"""
查看用户信息
$input:
id?str: ID
$output: @user
"""
user = db.run(r.table("user").get(id))
if not user:
abort(404, "NotFound", "用户不存在")
return user
def put(self, github, avatar):
"""
修改个人信息
$input:
github?url&optional: Github地址
avatar?url&optional: 头像
$output: @message
"""
db.run(
r.table("user")
.get(g.user["id"])
.update({
"github": github,
"avatar": avatar,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
})
)
return {"message": "OK"}
def put_email(self, email, password):
"""
修改邮箱地址
$input:
email?email: 邮箱
password?str: 密码
$output: @message
"""
self.check_password(g.user, password)
db.run(r.table("user").get(g.user["id"]).update({
"email": email,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
}))
return {"message": "OK"}
def put_password(self, new_password, password):
"""
修改密码
$input:
new_password?str: 新密码
password?str: 密码
$output: @message
"""
self.check_password(g.user, password)
db.run(r.table("user").get(g.user["id"]).update({
"pwdhash": gen_pwdhash(new_password),
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
}))
return {"message": "OK"}
def post_forgot(self, email):
"""
忘记密码,Token有效时间为2小时
$input:
email?email: 邮箱
$output: @message
$error:
400.UserNotFound: 用户不存在
"""
user = db.first(r.table("user").get_all(email, index="email"))
if not user:
abort(400, "UserNotFound", "用户不存在")
token = auth.encode_token({
"type": "reset",
"id": user["id"],
"exp": arrow.utcnow().replace(hours=2).timestamp,
"timestamp": user["timestamp"]
})
token = token.decode("ascii")
msg = Message("PurePage重置密码", recipients=[email])
msg.html = render_template(
"user-reset.html", token=token, userid=user["id"])
mail.send(msg)
return {"message": "重置链接已发送至邮箱,请查收"}
def post_reset(self, token, password):
"""
重置密码
$input:
token?str: 重置链接中的Token
password?str: 密码
$output: @message
$error:
403.InvalidToken: Token无效
"""
token = auth.decode_token(token)
if token is None or token.get("type") != "reset":
abort(403, "InvalidToken", "Token无效")
user = db.run(r.table("user").get(token["id"]))
if (
user is None
or "timestamp" not in token
or user["timestamp"] != token["timestamp"]
):
abort(403, "InvalidToken", "Token无效")
db.run(
r.table("user")
.get(token["id"])
.update({
"pwdhash": gen_pwdhash(password),
"timestamp": arrow.utcnow().timestamp
})
)
return {"message": "OK"}
|
guyskk/kkblog
|
api/purepage/views/user.py
|
Python
|
mit
| 6,802
|
import hmac
from functools import wraps
from flask import make_response
from flask import request
import config
import logger
from middleware import received_message
from middleware import received_postback
from middleware import validate_token
LOGGER = logger.getLogger(__name__)
APP_SECRET = bytes(config.facebook['APP_SECRET'], 'utf-8')
def initalize_routes(app):
if app is None:
raise ValueError('Application is required!')
context_root = '/v1.0/'
app.add_url_rule(context_root + 'webhook', 'webhook_validation', webhook_validation, methods=['GET'])
app.add_url_rule(context_root + 'webhook', 'webhook_callback', webhook_callback, methods=['POST'])
def verify_request_signature(func):
@wraps(func)
def decorated(*args, **kwargs):
signature = request.headers.get('x-hub-signature', None)
if signature:
elements = signature.split('=')
method = elements[0]
signature_hash = elements[1]
expected_hash = hmac.new(APP_SECRET, msg=request.get_data(), digestmod=method).hexdigest()
if signature_hash != expected_hash:
LOGGER.error('Signature was invalid')
return make_response('', 403)
else:
LOGGER.error('Could not validate the signature')
return func(*args, **kwargs)
return decorated
def webhook_validation():
mode = request.args.get('hub.mode')
token = request.args.get('hub.verify_token')
challenge = request.args.get('hub.challenge', '')
if validate_token(mode, token):
LOGGER.info('Token was successfully validated')
return make_response(challenge, 200)
else:
LOGGER.warning('Token was invalid!')
return make_response('', 403)
@verify_request_signature
def webhook_callback():
data = request.json
if data['object'] == 'page':
for page_entry in data['entry']:
page_id = page_entry['id']
time_of_event = page_entry['time']
for message_event in page_entry['messaging']:
if 'optin' in message_event:
LOGGER.info('Webhook received message event: option from page %s at %d', page_id, time_of_event)
elif 'message' in message_event:
received_message(message_event)
elif 'delivery' in message_event:
LOGGER.info('Webhook received message event: delivery from page %s at %d', page_id, time_of_event)
elif 'postback' in message_event:
received_postback(message_event)
elif 'read' in message_event:
LOGGER.info('Webhook received message event: read from page %s at %d', page_id, time_of_event)
elif 'account_linking' in message_event:
LOGGER.info('Webhook received message event: account linking from page %s at %d', page_id,
time_of_event)
else:
LOGGER.info('Webhook received unknown message event: %s from page %s at %d', message_event, page_id,
time_of_event)
return make_response('', 200)
|
nico-arianto/dota2-messenger-platform
|
route.py
|
Python
|
gpl-3.0
| 3,193
|
from setuptools import setup, find_packages
setup(
name='django-wakawaka',
version='0.4.dev7',
description='A super simple wiki app written in Python using the Django Framwork',
long_description=open('README.rst').read(),
author='Martin Mahner',
author_email='martin@mahner.org',
url='http://github.com/bartTC/django-wakawaka/tree/master',
packages=find_packages('src', exclude=('wakawaka_project',)),
package_dir = {'': 'src'},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
package_data = {
'wakawaka': [
'templates/wakawaka/*.html',
]
},
zip_safe=False,
)
|
pinax-archives/django-wakawaka
|
setup.py
|
Python
|
bsd-3-clause
| 911
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
###############################################################################
# chirribackup/actions/SnapshotDetails.py
#
# Print details about an snapshot
#
# -----------------------------------------------------------------------------
# Chirri Backup - Cheap and ugly backup tool
# Copyright (C) 2016 Gerardo Garcia Peña <killabytenow@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from chirribackup.exceptions import ChirriException
from chirribackup.Config import CONFIG
from chirribackup.Logger import logger
import chirribackup.actions.BaseAction
import chirribackup.LocalDatabase
import chirribackup.snapshot
import sys
class SnapshotDelete(chirribackup.actions.BaseAction.BaseAction):
help = {
"synopsis": "Delete snapshot",
"description": [
"Delete snapshot and data related to it.",
],
"args": [
[ "{snapshot_id}",
"The snapshot_id of the selected snapshot."
],
]
}
def parse_args(self, argv):
return {
"snapshot_id": int(argv.pop(0)),
}
def go(self, snapshot_id):
self.ldb = chirribackup.LocalDatabase.LocalDatabase(CONFIG.path)
chirribackup.snapshot.Snapshot(self.ldb).load(snapshot_id).delete()
|
killabytenow/chirribackup
|
chirribackup/actions/SnapshotDelete.py
|
Python
|
gpl-3.0
| 2,024
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=50),
),
]
|
taedori81/e-commerce-template
|
saleor/userprofile/migrations/0002_auto_20150902_1728.py
|
Python
|
bsd-3-clause
| 571
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2013-2016 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""Insert a simple polarized point source model into a dataset.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('Config polmodel polmodel_cli').split ()
import six, numpy as np, tempfile
from six.moves import range
from ...astutil import A2R, D2R, sphdist
from ...cli import check_usage, die
from ...io import Path
from ...kwargv import ParseKeywords, Custom
from . import util
from .util import sanitize_unicode as b
polmodel_doc = \
"""
casatask polmodel vis=<MS> field=<field specification>
Insert polarization information for a model into a Measurement Set. Uses a
built-in table of polarization properties to generate Stokes QUV information
from CASA's built-in Stokes I models.
The only currently supported source is 3C286 in C band.
"""
class Config (ParseKeywords):
vis = Custom (str, required=True)
field = Custom (str, required=True)
class PolSource (object):
name = None # fed to imager.predictcomp(objname)
ra = None # rad
dec = None # rad
models = None
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
class PolModel (object):
name = None
fmin = None # GHz
fmax = None # GHz
polfrac = None # [0,1]
polpa = None # degr
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def getquv (self, i):
# In the future, we might have different models that derive QUV
# from I in a different way. Probably won't, though.
a = self.polpa * D2R
p = i * self.polfrac
return p * np.cos (a), p * np.sin (a), 0.
position_tolerance = 1 * A2R
fluxscale_standard = 'Perley-Butler 2010'
sources = [
PolSource (name='3C286', ra=-2.74392753, dec=0.53248521,
models=[PolModel (name='C', fmin=3.9, fmax=8.1, polfrac=0.112, polpa=66.)])
]
def polmodel (cfg):
ms = util.tools.ms ()
tb = util.tools.table ()
im = util.tools.imager ()
cl = util.tools.componentlist ()
vis = Path (cfg.vis)
# Set up MS selection so we know what data we actually care about.
ms.open (b(cfg.vis))
ms.msselect (b(dict (field=cfg.field)))
rangeinfo = ms.range (b'data_desc_id field_id'.split ())
ddids = rangeinfo['data_desc_id']
fields = rangeinfo['field_id']
# Check that we know the field and pull up its model
if fields.size != 1:
die ('selection should pick exactly one field, but got %d', fields.size)
tb.open (b(vis / 'FIELD'))
refdir = tb.getcell (b'REFERENCE_DIR', fields[0])
tb.close ()
if refdir.shape[1] != 1:
die ('selected field %s has a time-variable reference direction, which I can\'t handle', cfg.field)
ra, dec = refdir[:,0]
for source in sources:
if sphdist (dec, ra, source.dec, source.ra) < position_tolerance:
break
else:
die ('found no match in my data table for field %s', cfg.field)
# Now we can get the spws and check that we have a model for them.
tb.open (b(vis / 'DATA_DESCRIPTION'))
ddspws = tb.getcol (b'SPECTRAL_WINDOW_ID')
tb.close ()
spws = list (set (ddspws[ddid] for ddid in ddids))
freqranges = {}
models = {}
allfreqs = []
tb.open (b(vis / 'SPECTRAL_WINDOW'))
for spw in spws:
freqs = tb.getcell (b'CHAN_FREQ', spw)
freqranges[spw] = (freqs[0], freqs[-1])
allfreqs += [freqs[0], freqs[-1]]
for model in source.models:
if freqs[0] >= model.fmin * 1e9 and freqs[-1] <= model.fmax * 1e9:
models[spw] = model
break
else:
die ('spw %d is out of frequency bounds for all of my models of '
'field %s (%s): spw range is (%f, %f) GHz', spw, cfg.field,
source.name, freqs[0] * 1e-9, freqs[-1] * 1e-9)
tb.close ()
# Now it's worth using predictcomp() to get the Stokes I fluxes.
workdir = tempfile.mkdtemp (prefix='mspolmodel')
try:
cp = im.predictcomp (
objname = b(source.name),
standard = b(fluxscale_standard),
freqs = allfreqs,
pfx = b(workdir + '/')
)
cl.open (cp)
if cl.length () != 1:
die ('expected one component in predicted list; got %d (%s)',
cl.length (), cp)
stokesi = cl.getcomponent (0)['spectrum']['ival']
# log=False: we'll have to run the risk that the user won't be aware that
# we closed the component list structure. Scary.
cl.close (log=False)
finally:
Path (workdir).rmtree ()
# And now we have everything we need. Invoke setjy() a bunch.
im.open (b(vis), usescratch=False)
for i, spw in enumerate (spws):
model = models[spw]
f1, f2 = freqranges[spw]
i1, i2 = stokesi[i*2:i*2+2]
spindex = np.log (i2 / i1) / np.log (f2 / f1)
q, u, v = model.getquv (i1)
reffreq = '%.3fMHz' % (f1 * 1e-6)
#print ('%2d/%d: %d %.3f-%.3f %.3f-%.3f [%.3f %.3f %.3f %3f] %.3f %s' \
# % (i + 1, len (spws), spw, f1*1e-9, f2*1e-9, i1, i2,
# i1, q, u, v, spindex, reffreq))
im.setjy (
field = b(cfg.field),
spw = b(str(spw)),
modimage = b'',
fluxdensity = [i1, q, u, v],
spix = spindex,
standard = b(fluxscale_standard),
scalebychan = True,
reffreq = b(reffreq),
)
im.close ()
def polmodel_cli (argv):
check_usage (polmodel_doc, argv, usageifnoargs=True)
cfg = Config ().parse (argv[1:])
polmodel (cfg)
|
pkgw/pwkit
|
pwkit/environments/casa/polmodel.py
|
Python
|
mit
| 5,781
|
import pyfits
import numpy as np
import psycopg2 as mdb
import csv
import os
import fnmatch
import cPickle as pickle
from scipy import spatial
import ftputil
import getpass
import copy
from sclip.sclip import sclip
from scipy.interpolate import UnivariateSpline
import scipy
from scipy import signal
#from pyflann import *
class spectrum:
def __init__(self, name, kind='norm', extension=4, wavelength='default', linearize=True):
#set atributes
if isinstance(name, basestring):
pass
else:
name=str(int(name))
self.name = name
self.ccd=int(self.name[-1])
self.date=int(self.name[:6])
self.run=int(self.name[6:10])
self.combine_method=int(self.name[10:12])
self.pivot=int(self.name[12:15])
#read spectrum
self.l=-1
self.f=-1
self.fe=-1
if setup.folder_is_root:
if name[10:12]=='01':
path=setup.folder+str(self.date)+'/standard/com/'+self.name+'.fits'
elif name[10:12]=='02':
path=setup.folder+str(self.date)+'/standard/com2/'+self.name+'.fits'
else:
path=None
else:
path=setup.folder+self.name+'.fits'
try:#read if it exists
hdulist = pyfits.open(path)
except:#otherwise download
if setup.download:
print ' - Spectrum %s not found. Serching in downloaded spectra.' % self.name
try:
path=setup.folder+self.name+'.fits'
hdulist = pyfits.open(path)
print ' + Spectrum %s already downloaded.' % self.name
except:
print ' - Spectrum %s not found. Downloading from the ftp.' % self.name
try:
with ftputil.FTPHost('site-ftp.aao.gov.au', 'galah', getpass.getpass()) as host:
if self.combine_method>=1:
host.download('reductions/Iraf_5.0/%s/combined/%s.fits' % (self.date, self.name), setup.folder+self.name+'.fits')
else:
host.download('reductions/Iraf_5.0/%s/individual/%s.fits' % (self.date, self.name), setup.folder+self.name+'.fits')
path=setup.folder+self.name+'.fits'
hdulist = pyfits.open(path)
print ' + Spectrum %s succesfully downloaded.' % self.name
except:
print ' + Spectrum %s failed to download.' % self.name
else:
print ' - Spectrum %s not found. Enable download to get it from the ftp site.' % self.name
#set l, f, and fe
instance={'norm':4, 'normalized':4, 'normalised':4, 'flux':0, 'fluxed':0}
#set radial velocity if it will be needed in the future:
#if is here because reading a spectrum is faster if read in its original velocity frame
if (wavelength=='observer' and instance[kind]==4) or (wavelength=='object' and instance[kind]<4) or instance[kind]==4:
con=setup.con
if con!='':
cur=con.cursor()
cur.execute("select rv_guess from sobject_iraf_53 where sobject_id=%s" % self.name[:-1])
try:
self.v=float(cur.fetchone()[0])
except TypeError:
print ' ! Warning: no velocity in the database. Assuming v=0.'
self.v=0.0
else:
try:
self.v=float(setup.db_dict[self.name[:-1]]['rv_guess'])
except:
print ' ! Warning: no velocity in the database. Assuming v=0.'
self.v=0.0
try:
self.f=hdulist[instance[kind]].data
if instance[kind]==4:
self.fe=hdulist[1].data
else:
self.fe=hdulist[instance[kind]+1].data
crval=hdulist[instance[kind]].header['CRVAL1']
crdel=hdulist[instance[kind]].header['CDELT1']
self.l=np.linspace(crval, crval+crdel*len(self.f), len(self.f))
if instance[kind]==4:
#because normalized spec doesn't has its error, we use the fluxed error, but have to shift and interpolate it to normalized l:
crval=hdulist[1].header['CRVAL1']
crdel=hdulist[1].header['CDELT1']
naxis=hdulist[1].header['NAXIS1']
error_l=np.linspace(crval, crval+crdel*naxis, naxis)
error_l=error_l*(1-self.v/299792.458)
self.fe=np.interp(self.l,error_l,self.fe)
except:
raise RuntimeError('Cannot read spectrum. Fits extension might be missing.')
#shift into correct velocity frame
if wavelength=='default':
pass
elif wavelength=='observer' and instance[kind]==4:
self.l=self.l*(1+self.v/299792.458)
elif wavelength=='object' and instance[kind]<4:
self.l=self.l*(1-self.v/299792.458)
else:
pass
#linearize
if linearize==True:
self.f=np.interp(np.linspace(self.l[0],self.l[-1],num=len(self.l)),self.l,self.f)
self.fe=np.interp(np.linspace(self.l[0],self.l[-1],num=len(self.l)),self.l,self.fe)
else:
pass
def linearize(self):
"""
take whatever the sampling is and linearize it
"""
self.f=np.interp(np.linspace(self.l[0],self.l[-1],num=len(self.l)),self.l,self.f)
self.fe=np.interp(np.linspace(self.l[0],self.l[-1],num=len(self.l)),self.l,self.fe)
self.l=np.linspace(self.l[0],self.l[-1],num=len(self.l))
def logarize(self):
"""
take whatever the sampling is and make a logaritmic sampling
"""
self.f=np.interp(np.logspace(np.log10(self.l[0]),np.log10(self.l[-1]),num=int(len(self.l)*1.15)),self.l,self.f)
self.fe=np.interp(np.logspace(np.log10(self.l[0]),np.log10(self.l[-1]),num=int(len(self.l)*1.15)),self.l,self.fe)
self.l=np.logspace(np.log10(self.l[0]),np.log10(self.l[-1]),num=int(len(self.l)*1.15))
def shift(self, rv, linearize=True):
"""
shift the spectrum for radial velocity rv, given in km/s
if linearize=True, the returned wavelength array will be linearized and flux interpolated
"""
l=self.l*(1+rv/299792.458)
if linearize==True:
self.f=np.interp(np.linspace(l[0],l[-1],num=len(l)),l,self.f)
self.fe=np.interp(np.linspace(l[0],l[-1],num=len(l)),l,self.fe)
self.l=np.linspace(l[0],l[-1],num=len(l))
else:
self.l=l
return self
def add_noise(self, snr, target_snr,skip=True):
"""
Adds poissonian noise to make a spectrum with snr into a spectrum with target_snr.
"""
if skip and target_snr>snr:#do not raise error if target_snr is larger than snr. Do nothing instead
return self
if target_snr>snr:
raise RuntimeError('Target SNR cannot be larger than the original SNR.')
elif target_snr==snr:
return self
else:
sigma=np.sqrt((1.0/target_snr)**2-(1.0/snr)**2)
noise=np.random.poisson((1.0/sigma)**2, size=len(self.f))
noise=noise/((1.0/sigma)**2)
self.f+=noise
self.f=self.f-1.0
self.fe+=1.0/noise
self.fe=self.fe-1.0
return self
def interpolate(self,space):
"""
interpolate the spectrum to a wavelength space defined in space.
"""
space=np.array(space)
self.f=np.interp(space,self.l,self.f)
self.fe=np.interp(space,self.l,self.fe)
self.l=space
return self
def normalize(self,deg,n,func,sl,su,grow=0,smooth=4e6):
"""
calculate the normalization for the spectrum and normalize
"""
functions.deg=deg
if func=='cheb' or func=='chebyshev':
result=sclip((self.l,self.f),chebyshev,int(n),su=su,sl=sl,min_data=100,verbose=False)
self.f=self.f/result[0]
if func=='poly':
result=sclip((self.l,self.f),poly,int(n),su=su,sl=sl,min_data=100,verbose=False)
self.f=self.f/result[0]
if func=='spline':
functions.smooth=smooth
result=sclip((self.l,self.f),spline,int(n),su=su,sl=sl,min_data=100,verbose=False)
self.f=self.f/result[0]
return result[0]
def convolve(self,fwhm,extend=False):
"""
decrease resolution by convolving the spectrum with a gaussian
returns the kernel
"""
#check if wavelength calibration is linear:
if (self.l[1]-self.l[0])==(self.l[-1]-self.l[-2]):
linear=True
else:
l=self.l
self.linearize()
linear=False
step=self.l[1]-self.l[0]
kernel=gauss_kern(fwhm/step)
add_dim=len(kernel)
if extend==True:
f=np.insert(self.f,0,np.ones(add_dim)*self.f[0])
f=np.append(f,np.ones(add_dim)*self.f[-1])
fe=np.insert(self.fe,0,np.ones(add_dim)*self.fe[0])
fe=np.append(fe,np.ones(add_dim)*self.fe[-1])
self.f=signal.fftconvolve(f,kernel,mode='same')[add_dim:-add_dim]
self.fe=signal.fftconvolve(fe**2,kernel,mode='same')[add_dim:-add_dim]
self.fe=np.sqrt(self.fe)
else:
self.f=signal.fftconvolve(self.f,kernel,mode='same')
self.fe=signal.fftconvolve(self.fe**2,kernel,mode='same')
self.fe=np.sqrt(self.fe)
if linear==False:
self.interpolate(l)
return kernel
def res_degradation(self,r,target_r):
"""
degradate resolution from resolving power r to resolving power target_r
"""
if r==target_r:
pass
elif target_r>r:
raise RuntimeError('Cannot increase the resolution.')
else:
l=np.average(self.l)
s=l/r
s_target=l/target_r
s_conv=np.sqrt(s_target**2-s**2)
self.convolve(s_conv,extend=True)
def equalize_resolution(self):
"""
convolves a spectrum with a kernel with a variable width. Works by warping the data, performing the convolution and unwarping the data, so it is vectorized (mostly) and fast
"""
#check if the correct resolution map is already in the memory:
if self.ccd==1:
if resolution_maps.map_ccd_1==False:
resolution_maps(self.ccd)
map_l,map_f=resolution_maps.map_ccd_1
else:
map_l,map_f=resolution_maps.map_ccd_1
if self.ccd==2:
if resolution_maps.map_ccd_2==False:
resolution_maps(self.ccd)
map_l,map_f=resolution_maps.map_ccd_2
else:
map_l,map_f=resolution_maps.map_ccd_2
if self.ccd==3:
if resolution_maps.map_ccd_3==False:
resolution_maps(self.ccd)
map_l,map_f=resolution_maps.map_ccd_3
else:
map_l,map_f=resolution_maps.map_ccd_3
if self.ccd==4:
if resolution_maps.map_ccd_4==False:
resolution_maps(self.ccd)
map_l,map_f=resolution_maps.map_ccd_4
else:
map_l,map_f=resolution_maps.map_ccd_4
#check if wavelength calibration is linear:
if (self.l[1]-self.l[0])==(self.l[-1]-self.l[-2]):
linear=True
else:
l=self.l
self.linearize()
linear=False
#extract the correct pivot number from the map:
map_f=map_f[self.pivot-1]
#current sampling:
sampl=self.l[1]-self.l[0]
#target sigma coresponds to the R=22000. We want the constant sigma, not constant R, so take the sigma that corresponds to the average R=22000
s_target=np.ones(len(map_l))*np.average(map_l)/22000.
#the sigma of the kernel is:
s=np.sqrt(s_target**2-np.divide(map_l,map_f)**2)
#fit it with the polynomial, so we have a function instead of sampled values:
map_fit=np.poly1d(np.polyfit(map_l, s/sampl, deg=6))
#create an array with new sampling. The first point is the same as in the spectrum:
l_new=[map_l[0]]
#and the sigma in pixels with which to convolve is
sampl=self.l[1]-self.l[0]
s_new=s/sampl/min(s/sampl)
#now create the non linear sampling. It should be finer where sigma is larger and sparser where sigma is smaller
#IS THERE A WAY TO VECTORIZE THIS???
for i in range(int(len(self.l)*np.max(s_new)*min(s/sampl))):
l_new.append(l_new[i]+sampl/map_fit(l_new[i]))
#interpolate the spectrum to the new sampling:
new_f=np.interp(np.array(l_new),self.l,self.f)
new_fe=np.interp(np.array(l_new),self.l,self.fe)
#the kernel is min(s_orig/sampl) pixels large, so we oversample as little as possible (for the very best precision we should oversample more, but this takes time)
kernel=gauss_kern(min(s/sampl))
#convolve the warped spectrum:
con_f=signal.fftconvolve(new_f,kernel,mode='same')
con_fe=signal.fftconvolve(new_fe**2,kernel,mode='same')
#inverse the warping:
self.f=np.interp(self.l,np.array(l_new),con_f)
self.fe=np.interp(self.l,np.array(l_new),con_fe)
self.fe=np.sqrt(self.fe)
if linear==False:
self.interpolate(l)
return self
def median_filter(self,size, extend=False):
"""
do a standard median filtering. give size in Angstroms, will be translated to nearest odd number of pixels.
"""
#check if wavelength calibration is linear:
if (self.l[1]-self.l[0])==(self.l[-1]-self.l[-2]):
linear=True
else:
linear=False
l=self.l
self.linearize()
step=self.l[1]-self.l[0]
add_dim=int(np.ceil(size/step // 2 * 2 + 1))
if extend==True:
f=np.insert(self.f,0,np.ones(add_dim)*self.f[0])
f=np.append(f,np.ones(add_dim)*self.f[-1])
fe=np.insert(self.fe,0,np.ones(add_dim)*self.fe[0])
fe=np.append(fe,np.ones(add_dim)*self.fe[-1])
self.f=signal.medfilt(f,add_dim)[add_dim:-add_dim]
self.fe=signal.medfilt(fe,add_dim,)[add_dim:-add_dim]
self.fe=self.fe/np.sqrt(add_dim)
else:
self.f=signal.medfilt(self.f,add_dim)
self.fe=signal.medfilt(self.fe,add_dim)
self.fe=self.fe/np.sqrt(add_dim)
if linear==False:
self.interpolate(l)
def knn(self,method='FLANN', K=10, d='euclidean', windows='', pickle_folder='pickled_spectra'):
"""
find nearest neighbours. spectra2pickle must be run first
windows is a filename with the description of windows to use or a 1D np.ndarray
"""
if pspectra.names==0:
pspectra(pickle_folder)
spectra=pspectra.spectra
names=pspectra.names
l=pspectra.space
f=np.interp(l,self.l,self.f)
#check if window function already exists:
if type(windows).__name__=='ndarray':#if given as an array only check if format is right
if type(window).__name__!='ndarray':
raise RuntimeError('windows is not type numpy.ndarray')
if len(window)!=len(l):
raise RuntimeError('windows has wrong size')
window=windows
window_function.window=windows
window_function.l=l
elif len(window_function.window)==0:#if it doesnt exist create it
window_function(l, windows)
window=window_function.window
else:#if it exists just use it
window=window_function.window
#create a mask where window==0, because there we don't have to comapre the vectors, because the difference is always 0:
mask=np.array(window,dtype=bool)
if method=='FLANN':
from pyflann import flann_index
distance={'manhattan': 'manhattan', 'euclidean': 'euclidean'}
if flann_index.flann==False:
flann_index(spectra[:,mask]*window[mask], distance[d])
ind,dist=flann_index.flann.nn_index(f[mask]*window[mask],K,checks=flann_index.index['checks'])
if distance[d]=='euclidean':
return names[ind], np.sqrt(dist)
else:
return names[ind], dist
if method=='KDTree':
distance={'manhattan': 1, 'euclidean': 2}
if kdtree_index.index==False:
kdtree_index(spectra[:,mask]*window[mask])
dist,ind=kdtree_index.index.query(f[mask]*window[mask],K,p=distance[d])
return names[ind], dist
def save_fits(self, fname=None):
"""
save the spectrum into a 2D fits file
"""
if fname==None:
fname=setup.folder+self.name+'.fits'
hdu = pyfits.PrimaryHDU(self.f)
hdu.writeto(fname)
hdulist = pyfits.open(fname,mode='update')
prihdr = hdulist[0].header
prihdr['COMMENT']='File written by galah_tools.py'
prihdr.set('CRVAL1', self.l[0])
prihdr.set('CDELT1', self.l[1]-self.l[0])
prihdr.set('CRPIX1', 1)
prihdr.set('CUNIT1', 'Angstroms')
hdulist.flush()
hdulist.close()
pyfits.append(fname,self.fe)
hdulist = pyfits.open(fname,mode='update')
prihdr = hdulist[1].header
prihdr['COMMENT']='File written by galah_tools.py'
prihdr.set('CRVAL1', self.l[0])
prihdr.set('CDELT1', self.l[1]-self.l[0])
prihdr.set('CRPIX1', 1)
prihdr.set('CUNIT1', 'Angstroms')
hdulist.flush()
hdulist.close()
def save_ascii(self, fname=None):
"""
save the spectrum into an ascii text file with three columns; wavelength, flux, error
"""
if fname==None:
fname=setup.folder+self.name+'.txt'
np.savetxt(fname,zip(self.l,self.f,self.fe))
class pspectra:
spectra=0
names=0
space=[]
def __init__(self,pickle_folder):
if pickle_folder[-1]=='/': pickle_folder=pickle_folder[:-1]
try:
pspectra.space=pickle.load(open('%s/space.pickle' % (pickle_folder), 'rb'))
#f=np.interp(space,self.l,self.f)
#l=space
except:
raise RuntimeError('Pickle spectra first if you want to use nearest neighbour search. No wavelength samling is saved.')
blocks=[]
for path, dirs, files in os.walk(os.path.abspath(pickle_folder)):
for filename in fnmatch.filter(files, 'b*.pickle'):
blocks.append(filename)
if len(blocks)==0:
raise RuntimeError('Pickle spectra first if you want to use nearest neighbour search. No pickled spectra saved.')
spectra_frompickle=[]
for i in blocks:
spectra_frompickle=spectra_frompickle+pickle.load(open('%s/%s' % (pickle_folder,i), 'rb'))
names=[]
spectra=[]
for i in spectra_frompickle:
names.append(i[0])
spectra.append(i[1])
pspectra.spectra=np.array(spectra)
pspectra.names=np.array(names)
class window_function:
window=[]
l=[]
def __init__(self,l,windows):
if windows=='':
window_function.window=np.array([1]*len(l))#if no file is given window function is constant 1
else:
window_function.window=read_windows(windows,l)
window_function.l=l
def plot_window(self):
"""
Plot the window function
"""
import matplotlib.pyplot as pl
fig=pl.figure('Window function (close to continue)')
pl.plot(window_function.l, window_function.window,'k-')
pl.xlabel('Wavelength / Angstroms')
pl.ylabel('Window value')
pl.title('Window function')
pl.show()
def clear(self):
"""
Clears a saved window function
"""
window_function.window=[]
window_function.l=[]
class functions:
def __init__(self):
deg=0
smooth=1000
class flann_index:
flann=False
index=0
def __init__(self, spectra, d):
set_distance_type(d)
flann_index.flann=FLANN()
flann_index.index=flann_index.flann.build_index(spectra, algorithm='autotuned', target_precision=0.9)
def clear(self):
flann_index.flann=False
flann_index.index=0
class kdtree_index:
index=False
def __init__(self, spectra):
kdtree_index.index=spatial.cKDTree(spectra)
def clear(self):
kdtree_index.index=False
class resolution_maps:
map_ccd_1=False
map_ccd_2=False
map_ccd_3=False
map_ccd_4=False
def __init__(self, ccd):
hdulist = pyfits.open('resolution_maps/ccd%s_piv.fits' % ccd)
res=hdulist[0].data
crval=hdulist[0].header['CRVAL1']
crdel=hdulist[0].header['CDELT1']
naxis=hdulist[0].header['NAXIS1']
l=np.linspace(crval, crval+crdel*naxis, naxis)
hdulist.close()
if ccd==1:
resolution_maps.map_ccd_1=(l,res)
if ccd==2:
resolution_maps.map_ccd_2=(l,res)
if ccd==3:
resolution_maps.map_ccd_3=(l,res)
if ccd==3:
resolution_maps.map_ccd_3=(l,res)
class setup:
folder=''
folder_is_root=False
con=''
csv=''
db_dict={}
download=False
def __init__(self, **kwargs):
for key in kwargs:
if key=='folder':
setup.folder=kwargs['folder']
if setup.folder[-1]!='/':
setup.folder=setup.folder+'/'
elif key=='root_folder':
setup.folder=kwargs['root_folder']
setup.folder_is_root=True
if setup.folder[-1]!='/':
setup.folder=setup.folder+'/'
if key=='con':
setup.con=kwargs['con']
if key=='download':
setup.download=kwargs['download']
if key=='csv':
setup.csv=kwargs['csv']
reader = csv.DictReader(open(setup.csv))
for row in reader:
key = row.pop('sobject_id')
if key in setup.db_dict:
pass
setup.db_dict[key] = row
def read_windows(filename,l):
windows=[]
cwindows=[]
#initially the window is set to constant 1
window=np.ones(len(l))
#read the file and save windows separately to those specified by lower and upepr limit and those specified by center and width
writeto=0
for line in open(filename, 'r'):
if len(line[:-1])==0:
writeto=1
continue
if line[0]=='#': pass
else:
data=line[:-1].split('\t')
sign=data[-1][0]
data=map(float,data)
data=data+[sign]
if writeto==0:
windows.append(data)
else:
cwindows.append(data)
#transform one format into the other
for i in windows:
cwindows.append([(i[0]+i[1])/2.0, i[1]-i[0], i[2], i[3], i[4]])
for w in cwindows:
if w[4]!='-':
for n,i in enumerate(l):
if abs(w[0]-i)<=(w[1]/2.0*(1-w[2])): window[n]*=w[3]
elif (w[0]+w[1]/2.0*(1-w[2]))<i<(w[0]+w[1]/2.0): window[n]*=(2-2*w[3])/(w[1]*w[2])*i+1-(1-w[3])/w[2]*(2*w[0]/w[1]+1)
elif (w[0]-w[1]/2.0*(1-w[2]))>i>(w[0]-w[1]/2.0): window[n]*=(2*w[3]-2)/(w[1]*w[2])*i+1-(w[3]-1)/w[2]*(2*w[0]/w[1]-1)
else: pass
else:
for n,i in enumerate(l):
if abs(w[0]-i)>=(w[1]/2.0): window[n]*=abs(w[3])
elif (w[0]+w[1]/2.0*(1-w[2]))<i<(w[0]+w[1]/2.0): window[n]*=(2*abs(w[3])-2)/(w[1]*w[2])*i+abs(w[3])-(abs(w[3])-1)/w[2]*(1+2.0*w[0]/w[1])
elif (w[0]-w[1]/2.0*(1-w[2]))>i>(w[0]-w[1]/2.0): window[n]*=(2-2*abs(w[3]))/(w[1]*w[2])*i+abs(w[3])-(1-abs(w[3]))/w[2]*(2.0*w[0]/w[1]-1)
else: pass
return window
def read(name, **kwargs):
"""
reads one spectrum
"""
spec=spectrum(name, **kwargs)
return spec
def spectra2pickle(spectra=[], ccd=1, space=[], limit=999999999999, pickle_folder='pickled_spectra'):
"""
Translate all spectra into blocks of pickle objects. Blocks are used, because only 10000 spectra can be in one object.
After the pickle blocks are read they can be combined into a single table again.
This is used by nearest neighbour search, because spectra must be read fast.
"""
if pickle_folder[-1]=='/': pickle_folder=pickle_folder[:-1]
#using pickle to store data can be dangerous. Display warning
print ' ! Warning: using pickle to store data can be dangerous. Use at your own risk!'
#check if defined space is valid for the given ccd:
if ccd==1:
if space==[]: space=np.linspace(4713,4903,4100)
if not any(4713<i<4903 for i in space): raise RuntimeError('Space is out of boundary for ccd 1')
if ccd==2:
if space==[]: space=np.linspace(5600,5875,4100)
if not any(5600<i<5875 for i in space): raise RuntimeError('Space is out of boundary for ccd 2')
if ccd==3:
if space==[]: space=np.linspace(6477,6740,4100)
if not any(6477<i<6740 for i in space): raise RuntimeError('Space is out of boundary for ccd 3')
if ccd==4:
if space==[]: space=np.linspace(7584,7887,4100)
if not any(7584<i<7887 for i in space): raise RuntimeError('Space is out of boundary for ccd 4')
#create a folder to store pickled spectra:
if not os.path.exists('pickled_spectra'):
os.makedirs('pickled_spectra')
#create a list of spectra:
if spectra==[]:
for path, dirs, files in os.walk(os.path.abspath(setup.folder)):
for filename in fnmatch.filter(files, '*%s.fits' % (ccd)):
spectra.append(filename[:-5])
#read and interpolate spectra one by one:
block=[]
block_num=0
n=0
nn=0
for i in spectra:
if n==9999:#only 10000 spectra can be saved in one file
pickle.dump(block,open('%s/b%s.pickle' % (pickle_folder,block_num), 'wb'))
block_num+=1
n=0
block=[]
try:#if there is no normalized spectrum skip it
s=read(i, kind='norm', wavelength='default').interpolate(space)
block.append([i,s.f])
n+=1
except RuntimeError:
pass
nn+=1
if nn>=limit: break# stop reading spectra if limit is reached (to spare the memory or time when testing)
if nn%10 == 0: print nn, '/', len(spectra), "pickled."
pickle.dump(block,open('%s/b%s.pickle' % (pickle_folder,block_num+1), 'wb'))
pickle.dump(space,open('%s/space.pickle' % (pickle_folder), 'wb'))
return space
def cc(i1,i2):
"""
Cross correlate spectra in array s1 with spectrum s2.
Returns the shift between spectra (an array if s1 is an array) in km/s
"""
s1=copy.deepcopy(i1)
s2=copy.deepcopy(i2)
s2.logarize()
if isinstance(s1, spectrum): s1=[s1]
ccs=[]
for s in s1:
s.interpolate(s2.l)
ccf=np.correlate(s.f[10:-10]-np.average(s.f[10:-10]),s2.f[10:-10]-np.average(s2.f[10:-10]),mode='same')
max_index=np.argmax(ccf)
max_fit=np.polyfit(range(max_index-3,max_index+4),ccf[max_index-3:max_index+4],2)
max_fit= -max_fit[1]/(max_fit[0]*2.0)
diff=max_fit-len(ccf)/2.0
dl=s.l[1]-s.l[0]
ccs.append(dl*diff/s.l[0]*299792.458)
if len(ccs)==1:
return ccs[0]
else:
return np.array(ccs)
def chebyshev(p,ye,mask):
coef=np.polynomial.chebyshev.chebfit(p[0][mask], p[1][mask], functions.deg)
cont=np.polynomial.chebyshev.chebval(p[0],coef)
return cont
def poly(p,ye,mask):
r=np.polyfit(p[0][mask], p[1][mask], deg=functions.deg)
f=np.poly1d(r)
return f(p[0])
def spline(p,ye,mask):
spl = UnivariateSpline(p[0][mask], p[1][mask],k=functions.deg)
spl.set_smoothing_factor(5000000)
return spl(p[0])
def gauss_kern(fwhm):
""" Returns a normalized 1D gauss kernel array for convolutions """
size=2*(fwhm/2.355)**2
size_grid = int(size) # we limit the size of kernel, so it is as small as possible (or minimal size) for faster calculations
if size_grid<7: size_grid=7
x= scipy.mgrid[-size_grid:size_grid+1]
g = scipy.exp(-(x**2/float(size)))
return g / np.sum(g)
|
sheliak/galah_tools
|
old_versions/galah_tools.py
|
Python
|
gpl-2.0
| 24,308
|
import pytest
import networkx as nx
from networkx.generators.classic import barbell_graph, cycle_graph, path_graph
from networkx.testing.utils import assert_graphs_equal
#numpy = pytest.importorskip("numpy")
class TestConvertNumpy(object):
@classmethod
def setup_class(cls):
global np
global np_assert_equal
try:
import numpy as np
np_assert_equal = np.testing.assert_equal
except ImportError:
pytest.skip('Numpy not available', allow_module_level=True)
def setup_method(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph)
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def test_exceptions(self):
G = np.array("a")
pytest.raises(nx.NetworkXError, nx.to_networkx_graph, G)
def create_weighted(self, G):
g = cycle_graph(4)
G.add_nodes_from(g)
G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges())
return G
def assert_equal(self, G1, G2):
assert sorted(G1.nodes()) == sorted(G2.nodes())
assert sorted(G1.edges()) == sorted(G2.edges())
def identity_conversion(self, G, A, create_using):
assert(A.sum() > 0)
GG = nx.from_numpy_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = nx.empty_graph(0, create_using).__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A = np.array([[1, 2, 3], [4, 5, 6]])
pytest.raises(nx.NetworkXError, nx.from_numpy_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to matrix to graph."
A = nx.to_numpy_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_matrix(self.G1)
A = np.asarray(A)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"""Conversion from digraph to matrix to digraph."""
A = nx.to_numpy_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_matrix(self.G2)
A = np.asarray(A)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to matrix to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
A = np.asarray(A)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to matrix to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
A = np.asarray(A)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = list(P3)
A = nx.to_numpy_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
pytest.raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from((n, n + 1, dict(weight=0.5, other=0.3)) for n in range(3))
P4 = path_graph(4)
A = nx.to_numpy_matrix(P4)
np_assert_equal(A, nx.to_numpy_matrix(WP4, weight=None))
np_assert_equal(0.5 * A, nx.to_numpy_matrix(WP4))
np_assert_equal(0.3 * A, nx.to_numpy_matrix(WP4, weight='other'))
def test_from_numpy_matrix_type(self):
A = np.matrix([[1]])
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == int
A = np.matrix([[1]]).astype(np.float)
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == float
A = np.matrix([[1]]).astype(np.str)
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == str
A = np.matrix([[1]]).astype(np.bool)
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == bool
A = np.matrix([[1]]).astype(np.complex)
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == complex
A = np.matrix([[1]]).astype(np.object)
pytest.raises(TypeError, nx.from_numpy_matrix, A)
G = nx.cycle_graph(3)
A = nx.adj_matrix(G).todense()
H = nx.from_numpy_matrix(A)
assert all(type(m) == int and type(n) == int for m, n in H.edges())
H = nx.from_numpy_array(A)
assert all(type(m) == int and type(n) == int for m, n in H.edges())
def test_from_numpy_matrix_dtype(self):
dt = [('weight', float), ('cost', int)]
A = np.matrix([[(1.0, 2)]], dtype=dt)
G = nx.from_numpy_matrix(A)
assert type(G[0][0]['weight']) == float
assert type(G[0][0]['cost']) == int
assert G[0][0]['cost'] == 2
assert G[0][0]['weight'] == 1.0
def test_to_numpy_recarray(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7.0, cost=5)
A = nx.to_numpy_recarray(G, dtype=[('weight', float), ('cost', int)])
assert sorted(A.dtype.names) == ['cost', 'weight']
assert A.weight[0, 1] == 7.0
assert A.weight[0, 0] == 0.0
assert A.cost[0, 1] == 5
assert A.cost[0, 0] == 0
def test_numpy_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, weight=7)
G.add_edge(1, 2, weight=70)
A = nx.to_numpy_matrix(G)
assert A[1, 0] == 77
A = nx.to_numpy_matrix(G, multigraph_weight=min)
assert A[1, 0] == 7
A = nx.to_numpy_matrix(G, multigraph_weight=max)
assert A[1, 0] == 70
def test_from_numpy_matrix_parallel_edges(self):
"""Tests that the :func:`networkx.from_numpy_matrix` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = np.matrix([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.DiGraph)
assert_graphs_equal(actual, expected)
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.DiGraph)
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.MultiDiGraph)
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.MultiDiGraph)
assert_graphs_equal(actual, expected)
def test_symmetric(self):
"""Tests that a symmetric matrix has edges added only once to an
undirected multigraph when using :func:`networkx.from_numpy_matrix`.
"""
A = np.matrix([[0, 1], [1, 0]])
G = nx.from_numpy_matrix(A, create_using=nx.MultiGraph)
expected = nx.MultiGraph()
expected.add_edge(0, 1, weight=1)
assert_graphs_equal(G, expected)
def test_dtype_int_graph(self):
"""Test that setting dtype int actually gives an integer matrix.
For more information, see GitHub pull request #1363.
"""
G = nx.complete_graph(3)
A = nx.to_numpy_matrix(G, dtype=int)
assert A.dtype == int
def test_dtype_int_multigraph(self):
"""Test that setting dtype int actually gives an integer matrix.
For more information, see GitHub pull request #1363.
"""
G = nx.MultiGraph(nx.complete_graph(3))
A = nx.to_numpy_matrix(G, dtype=int)
assert A.dtype == int
class TestConvertNumpyArray(object):
@classmethod
def setup_class(cls):
global np
global np_assert_equal
np = pytest.importorskip('numpy')
np_assert_equal = np.testing.assert_equal
def setup_method(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph)
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
G.add_nodes_from(g)
G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges())
return G
def assert_equal(self, G1, G2):
assert sorted(G1.nodes()) == sorted(G2.nodes())
assert sorted(G1.edges()) == sorted(G2.edges())
def identity_conversion(self, G, A, create_using):
assert(A.sum() > 0)
GG = nx.from_numpy_array(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = nx.empty_graph(0, create_using).__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A = np.array([[1, 2, 3], [4, 5, 6]])
pytest.raises(nx.NetworkXError, nx.from_numpy_array, A)
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_array(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_array(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_array(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_array(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to array to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = list(P3)
A = nx.to_numpy_array(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
pytest.raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from((n, n + 1, dict(weight=0.5, other=0.3)) for n in range(3))
P4 = path_graph(4)
A = nx.to_numpy_array(P4)
np_assert_equal(A, nx.to_numpy_array(WP4, weight=None))
np_assert_equal(0.5 * A, nx.to_numpy_array(WP4))
np_assert_equal(0.3 * A, nx.to_numpy_array(WP4, weight='other'))
def test_from_numpy_array_type(self):
A = np.array([[1]])
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == int
A = np.array([[1]]).astype(np.float)
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == float
A = np.array([[1]]).astype(np.str)
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == str
A = np.array([[1]]).astype(np.bool)
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == bool
A = np.array([[1]]).astype(np.complex)
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == complex
A = np.array([[1]]).astype(np.object)
pytest.raises(TypeError, nx.from_numpy_array, A)
def test_from_numpy_array_dtype(self):
dt = [('weight', float), ('cost', int)]
A = np.array([[(1.0, 2)]], dtype=dt)
G = nx.from_numpy_array(A)
assert type(G[0][0]['weight']) == float
assert type(G[0][0]['cost']) == int
assert G[0][0]['cost'] == 2
assert G[0][0]['weight'] == 1.0
def test_to_numpy_recarray(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7.0, cost=5)
A = nx.to_numpy_recarray(G, dtype=[('weight', float), ('cost', int)])
assert sorted(A.dtype.names) == ['cost', 'weight']
assert A.weight[0, 1] == 7.0
assert A.weight[0, 0] == 0.0
assert A.cost[0, 1] == 5
assert A.cost[0, 0] == 0
def test_numpy_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, weight=7)
G.add_edge(1, 2, weight=70)
A = nx.to_numpy_array(G)
assert A[1, 0] == 77
A = nx.to_numpy_array(G, multigraph_weight=min)
assert A[1, 0] == 7
A = nx.to_numpy_array(G, multigraph_weight=max)
assert A[1, 0] == 70
def test_from_numpy_array_parallel_edges(self):
"""Tests that the :func:`networkx.from_numpy_array` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = np.array([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_numpy_array(A, parallel_edges=True,
create_using=nx.DiGraph)
assert_graphs_equal(actual, expected)
actual = nx.from_numpy_array(A, parallel_edges=False,
create_using=nx.DiGraph)
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_numpy_array(A, parallel_edges=True,
create_using=nx.MultiDiGraph)
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_numpy_array(A, parallel_edges=False,
create_using=nx.MultiDiGraph)
assert_graphs_equal(actual, expected)
def test_symmetric(self):
"""Tests that a symmetric array has edges added only once to an
undirected multigraph when using :func:`networkx.from_numpy_array`.
"""
A = np.array([[0, 1], [1, 0]])
G = nx.from_numpy_array(A, create_using=nx.MultiGraph)
expected = nx.MultiGraph()
expected.add_edge(0, 1, weight=1)
assert_graphs_equal(G, expected)
def test_dtype_int_graph(self):
"""Test that setting dtype int actually gives an integer array.
For more information, see GitHub pull request #1363.
"""
G = nx.complete_graph(3)
A = nx.to_numpy_array(G, dtype=int)
assert A.dtype == int
def test_dtype_int_multigraph(self):
"""Test that setting dtype int actually gives an integer array.
For more information, see GitHub pull request #1363.
"""
G = nx.MultiGraph(nx.complete_graph(3))
A = nx.to_numpy_array(G, dtype=int)
assert A.dtype == int
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/tests/test_convert_numpy.py
|
Python
|
mit
| 17,291
|
import argparse
import smtplib
from email import charset
from email.MIMEText import MIMEText
import sys
import socks
import random
from twitter import *
def base64_char(i):
if 0 <= i <= 25: # A-Z
return chr(i + ord('A'))
elif 26 <= i <= 51: # a-z
return chr(i + ord('a') - 26)
elif 52 <= i <= 61: # 0-9
return chr(i + ord('0') - 52)
elif i == 62:
return '+'
else:
return '/'
def fake_pgp_msg():
body = """\
-----BEGIN PGP MESSAGE-----
Version: GnuPG v1.4.12 (GNU/Linux)
"""
for i in xrange(random.randrange(2000,5000)):
body += base64_char(random.randrange(64))
if (i + 1) % 64 == 0:
body += '\n'
body += "==\n="
for i in xrange(0,4):
body += base64_char(random.randrange(64))
body +="\n-----END PGP MESSAGE-----"
return body
class noise_dispatcher(object):
def __init__(self,argv):
self.config_parser(argv)
def config_parser(self,argv):
if type(argv) == dict:
argd = argv
argv = []
for k,v in argd.iteritems():
argv.append('--' + k)
argv.append(v)
return argv
def dispatch(self,noise):
pass
class twitter_dispatcher(noise_dispatcher):
def config_parser(self,argv):
argv = super(twitter_dispatcher,self).config_parser(argv)
parser = argparse.ArgumentParser(description='This is the NOISE Twitter dispatch program.',prog='noise_tweet.py')
parser.add_argument('-a','--app-name',type=str,required=True,help="Name of the application used to tweet")
parser.add_argument('-k','--consumer-key',type=str,required=True,help="Consumer key specific to the application used to tweet")
parser.add_argument('-c','--consumer-secret',type=str,required=True,help="Consumer secret specific to the application used to tweet")
parser.add_argument('-t','--oauth-token',type=str,help="OAuth token authorizing the user to the application. If missing, you will be promted to generate one.")
parser.add_argument('-s','--oauth-secret',type=str,help="OAuth secret authorizing the user to the application. If missing, you will be promted to generate one.")
parser.add_argument('-b','--status',type=str,help="Status/text to tweet")
parser.add_argument('--proxy',type=int,const=9050,nargs='?',help="Require the use of a proxy, optionally specifying the port. Default port: %(const)s.")
self.args = parser.parse_args(argv)
if not self.args.oauth_token and not self.args.oauth_secret:
self.oauth_token, self.oauth_secret = oauth_dance(self.args.app_name,self.args.consumer_key,self.args.consumer_secret)
print """Add the following lines to your NOISE configuration file
(e.g. noise.conf) under the [TwitterDispatch] section:
oauth-token = %s
oauth-secret = %s
""" % (self.oauth_token, self.oauth_secret)
else:
self.oauth_token, self.oauth_secret = self.args.oauth_token, self.args.oauth_secret
def dispatch(self,noise=None):
if noise and len(noise) > 140:
return
if self.args.proxy:
# Let's use a Tor SOCKS proxy, if available. Obviously, start Tor before running this program
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, 'localhost', self.args.proxy)
s = socks.socksocket()
s.connect(('example.com', 80))
s.close()
socks.wrapmodule(twitter)
t = Twitter(
auth=OAuth(self.oauth_token, self.oauth_secret,
self.args.consumer_key, self.args.consumer_secret)
)
t.statuses.update(status = noise if noise else self.args.status)
return "Successfully tweeted"
class email_dispatcher(noise_dispatcher):
def config_parser(self,argv):
argv = super(email_dispatcher,self).config_parser(argv)
parser = argparse.ArgumentParser(description='This is the NOISE Email dispatch program.',prog='noise_dispatch.py')
parser.add_argument('-f','--from',dest='sender',type=str,help="Sender (doubles as username), e.g. myemail@gmail.com", required=True)
parser.add_argument('-t','--to',type=str,help="Recipient email address, e.g. foo@hotmail.com", required=True)
parser.add_argument('-r','--server',type=str,help="Remote SMTP server, e.g. smtp.gmail.com", required=True)
parser.add_argument('-p','--pass',dest='passwd',type=str,help="Account passphrase on remote server", required=True)
parser.add_argument('-s','--subject',type=str,help="Email subject field", required=True)
parser.add_argument('-b','--body',type=str,help="Email body text")
parser.add_argument('-e','--encrypted',const=True,default=False,nargs='?',help="Generate fake encrypted emails instead of generating plaintext")
parser.add_argument('--proxy',type=int,const=9050,nargs='?',help="Require the use of a proxy, optionally specifying the port. Default port: %(const)s.")
self.args = parser.parse_args(argv)
def dispatch(self,noise=None):
charset.add_charset('utf-8', charset.SHORTEST)
if self.args.encrypted and self.args.encrypted.lower() not in ['false','no','0']:
msg = MIMEText(fake_pgp_msg(), _charset='utf-8')
else:
msg = MIMEText(noise if noise else self.args.body, _charset='utf-8')
msg['Subject'] = self.args.subject
msg['From'] = self.args.sender
if ',' in self.args.to:
random.seed()
msg['To'] = random.choice(self.args.to.split(', '))
else:
msg['To'] = self.args.to
if self.args.proxy:
# Let's use a Tor SOCKS proxy, if available. Obviously, start Tor before running this program
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, 'localhost', self.args.proxy)
s = socks.socksocket()
s.connect(('example.com', 80))
s.close()
socks.wrapmodule(smtplib)
# Use STARTTLS for added security
smtpserver = smtplib.SMTP(self.args.server)
smtpserver.starttls()
smtpserver.set_debuglevel(True)
smtpserver.login(self.args.sender,self.args.passwd)
try:
smtpserver.sendmail(self.args.sender, [self.args.to], msg.as_string())
finally:
smtpserver.close()
return "Successfully sent mail"
if __name__ == '__main__':
d = email_dispatcher(sys.argv[1:])
print d.dispatch()
|
dismantl/NOISE
|
noise_dispatch.py
|
Python
|
gpl-3.0
| 6,128
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import weakref
import pyarrow as pa
@contextlib.contextmanager
def allocate_bytes(pool, nbytes):
"""
Temporarily allocate *nbytes* from the given *pool*.
"""
arr = pa.array([b"x" * nbytes], type=pa.binary(), memory_pool=pool)
# Fetch the values buffer from the varbinary array and release the rest,
# to get the desired allocation amount
buf = arr.buffers()[2]
arr = None
assert len(buf) == nbytes
try:
yield
finally:
buf = None
def check_allocated_bytes(pool):
"""
Check allocation stats on *pool*.
"""
allocated_before = pool.bytes_allocated()
max_mem_before = pool.max_memory()
with allocate_bytes(pool, 512):
assert pool.bytes_allocated() == allocated_before + 512
new_max_memory = pool.max_memory()
assert pool.max_memory() >= max_mem_before
assert pool.bytes_allocated() == allocated_before
assert pool.max_memory() == new_max_memory
def test_default_allocated_bytes():
pool = pa.default_memory_pool()
with allocate_bytes(pool, 1024):
check_allocated_bytes(pool)
assert pool.bytes_allocated() == pa.total_allocated_bytes()
def test_proxy_memory_pool():
pool = pa.proxy_memory_pool(pa.default_memory_pool())
check_allocated_bytes(pool)
wr = weakref.ref(pool)
assert wr() is not None
del pool
assert wr() is None
def test_logging_memory_pool(capfd):
pool = pa.logging_memory_pool(pa.default_memory_pool())
check_allocated_bytes(pool)
out, err = capfd.readouterr()
assert err == ""
assert out.count("Allocate:") > 0
assert out.count("Allocate:") == out.count("Free:")
def test_set_memory_pool():
old_pool = pa.default_memory_pool()
pool = pa.proxy_memory_pool(old_pool)
pa.set_memory_pool(pool)
try:
allocated_before = pool.bytes_allocated()
with allocate_bytes(None, 512):
assert pool.bytes_allocated() == allocated_before + 512
assert pool.bytes_allocated() == allocated_before
finally:
pa.set_memory_pool(old_pool)
|
xhochy/arrow
|
python/pyarrow/tests/test_memory.py
|
Python
|
apache-2.0
| 2,890
|
"""
Unit tests for django-registration.
These tests assume that you've completed all the prerequisites for
getting django-registration running in the default setup, to wit:
1. You have ``registration`` in your ``INSTALLED_APPS`` setting.
2. You have created all of the templates mentioned in this
application's documentation.
3. You have added the setting ``ACCOUNT_ACTIVATION_DAYS`` to your
settings file.
4. You have URL patterns pointing to the registration and activation
views, with the names ``registration_register`` and
``registration_activate``, respectively, and a URL pattern named
'registration_complete'.
"""
import datetime
import sha
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core import management
from django.core.urlresolvers import reverse
from django.test import TestCase
from commoner.registration import forms
from commoner.registration.models import RegistrationProfile
from commoner.registration import signals
class RegistrationTestCase(TestCase):
"""
Base class for the test cases; this sets up two users -- one
expired, one not -- which are used to exercise various parts
of the application.
"""
def setUp(self):
self.sample_user = RegistrationProfile.objects.create_inactive_user(username='alice',
password='secret',
email='alice@example.com')
self.expired_user = RegistrationProfile.objects.create_inactive_user(username='bob',
password='swordfish',
email='bob@example.com')
self.expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
self.expired_user.save()
class RegistrationModelTests(RegistrationTestCase):
"""
Tests for the model-oriented functionality of django-registration,
including ``RegistrationProfile`` and its custom manager.
"""
def test_new_user_is_inactive(self):
"""
Test that a newly-created user is inactive.
"""
self.failIf(self.sample_user.is_active)
def test_registration_profile_created(self):
"""
Test that a ``RegistrationProfile`` is created for a new user.
"""
self.assertEqual(RegistrationProfile.objects.count(), 2)
def test_activation_email(self):
"""
Test that user signup sends an activation email.
"""
self.assertEqual(len(mail.outbox), 2)
def test_activation_email_disable(self):
"""
Test that activation email can be disabled.
"""
RegistrationProfile.objects.create_inactive_user(username='noemail',
password='foo',
email='nobody@example.com',
send_email=False)
self.assertEqual(len(mail.outbox), 2)
def test_activation(self):
"""
Test that user activation actually activates the user and
properly resets the activation key, and fails for an
already-active or expired user, or an invalid key.
"""
# Activating a valid user returns the user.
self.failUnlessEqual(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key).pk,
self.sample_user.pk)
# The activated user must now be active.
self.failUnless(User.objects.get(pk=self.sample_user.pk).is_active)
# The activation key must now be reset to the "already activated" constant.
self.failUnlessEqual(RegistrationProfile.objects.get(user=self.sample_user).activation_key,
RegistrationProfile.ACTIVATED)
# Activating an expired user returns False.
self.failIf(RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.expired_user).activation_key))
# Activating from a key that isn't a SHA1 hash returns False.
self.failIf(RegistrationProfile.objects.activate_user('foo'))
# Activating from a key that doesn't exist returns False.
self.failIf(RegistrationProfile.objects.activate_user(sha.new('foo').hexdigest()))
def test_account_expiration_condition(self):
"""
Test that ``RegistrationProfile.activation_key_expired()``
returns ``True`` for expired users and for active users, and
``False`` otherwise.
"""
# Unexpired user returns False.
self.failIf(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
# Expired user returns True.
self.failUnless(RegistrationProfile.objects.get(user=self.expired_user).activation_key_expired())
# Activated user returns True.
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user=self.sample_user).activation_key)
self.failUnless(RegistrationProfile.objects.get(user=self.sample_user).activation_key_expired())
def test_expired_user_deletion(self):
"""
Test that
``RegistrationProfile.objects.delete_expired_users()`` deletes
only inactive users whose activation window has expired.
"""
RegistrationProfile.objects.delete_expired_users()
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_management_command(self):
"""
Test that ``manage.py cleanupregistration`` functions
correctly.
"""
management.call_command('cleanupregistration')
self.assertEqual(RegistrationProfile.objects.count(), 1)
def test_signals(self):
"""
Test that the ``user_registered`` and ``user_activated``
signals are sent, and that they send the ``User`` as an
argument.
"""
def receiver(sender, **kwargs):
self.assert_('user' in kwargs)
self.assertEqual(kwargs['user'].username, u'signal_test')
received_signals.append(kwargs.get('signal'))
received_signals = []
expected_signals = [signals.user_registered, signals.user_activated]
for signal in expected_signals:
signal.connect(receiver)
RegistrationProfile.objects.create_inactive_user(username='signal_test',
password='foo',
email='nobody@example.com',
send_email=False)
RegistrationProfile.objects.activate_user(RegistrationProfile.objects.get(user__username='signal_test').activation_key)
self.assertEqual(received_signals, expected_signals)
class RegistrationFormTests(RegistrationTestCase):
"""
Tests for the forms and custom validation logic included in
django-registration.
"""
fixtures = ['test_codes.json',]
def test_registration_form(self):
"""
Test that ``RegistrationForm`` enforces username constraints
and matching passwords.
"""
invalid_data_dicts = [
# Non-alphanumeric username.
{
'data':
{ 'username': 'foo/bar',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos': 'on',},
'error':
('username', [u"Enter a valid value."])
},
# Already-existing username.
{
'data':
{ 'username': 'alice',
'email': 'alice@example.com',
'password1': 'secret',
'password2': 'secret',
'agree_to_tos': 'on', },
'error':
('username', [u"This username is already taken. Please choose another."])
},
# Mismatched passwords.
{
'data':
{ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'bar',
'agree_to_tos': 'on', },
'error':
('__all__', [u"You must type the same password each time"])
},
# Must agree to TOS
{
'data':
{ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos': False, },
'error':
('agree_to_tos', [u"You must agree to the terms to register"])
},
]
for invalid_dict in invalid_data_dicts:
form = forms.RegistrationForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]], invalid_dict['error'][1])
form = forms.RegistrationForm(data={ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos':'on',
'promo_code':'12345678'})
self.failUnless(form.is_valid())
class RegistrationViewTests(RegistrationTestCase):
"""
Tests for the views included in django-registration.
"""
def _test_registration_view(self):
"""
Underscored to prevent running while free accounts are prohibited
Test that the registration view rejects invalid submissions,
and creates a new user and redirects after a valid submission.
"""
# Invalid data fails.
response = self.client.post(reverse('registration_register'),
data={ 'username': 'alice', # Will fail on username uniqueness.
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo' })
self.assertEqual(response.status_code, 200)
self.failUnless(response.context[0]['form'])
self.failUnless(response.context[0]['form'].errors)
response = self.client.post(reverse('registration_register'),
data={ 'username': 'foo',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'agree_to_tos':'on'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/check_inbox.html')
self.assertEqual(RegistrationProfile.objects.count(), 3)
def test_activation_view(self):
"""
Test that the activation view activates the user from a valid
key and fails if the key is invalid or has expired.
"""
# Valid user puts the user account into the context.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': RegistrationProfile.objects.get(user=self.sample_user).activation_key }))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['account'].pk, self.sample_user.pk)
# Expired user sets the account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': RegistrationProfile.objects.get(user=self.expired_user).activation_key }))
self.assertEqual(response.status_code, 404)
# Invalid key gets to the view, but sets account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': 'foo' }))
# hmmm, need an assertion here
# Nonexistent key sets the account to False.
response = self.client.get(reverse('registration_activate',
kwargs={ 'activation_key': sha.new('foo').hexdigest() }))
self.assertEqual(response.status_code, 404)
|
cc-archive/commoner
|
src/commoner/registration/tests.py
|
Python
|
agpl-3.0
| 12,845
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in get_model_evaluation_image_object_detection_sample
name = name
return name
|
googleapis/python-aiplatform
|
.sample_configs/param_handlers/get_model_evaluation_image_object_detection_sample.py
|
Python
|
apache-2.0
| 735
|
# Definitions file for server API variables/strings.
# Make calls to these variables in code instead of explicit definitions.
# Definitions file for server API variables/strings.
# Make calls to these variables in code instead of explicit definitions.
class client_api:
data_separator = "|"
# Client message codes
login_code = "login"
register_code = "register"
upload_code = "upload"
retrieve_code = "retrieve"
search_code = "search"
delete_code = "delete"
# Server message codes
login_status_code = "login_status"
|
operant/knowledge-management
|
Server/server_c.py
|
Python
|
mit
| 577
|
import configparser
import cv2
import numpy as np
from Image.Cannify import Cannify
from Image.IsolateDigits import IsolateDigits
from Image.Straighten import Straighten
class Pipeline:
def __init__(self, filename: str):
self.file = filename
self.img = cv2.imread(self.file, 0)
self.height, self.width = self.img.shape
""" :var Canny """
self.cannify = None
config = configparser.ConfigParser()
config.read('config.ini')
self.config: map = config['Pipeline']
self.debug: bool = bool(self.config['debug'])
self.sample_size = eval(self.config['sample_size'])
print('sample_size', type(self.sample_size), self.sample_size)
def process(self):
if self.debug:
cv2.imwrite('1-original.png', self.img)
edges = cv2.Canny(self.img,
int(self.config['canny.threshold1']),
int(self.config['canny.threshold2']))
if self.debug:
cv2.imwrite('2-edges.png', edges)
straighten = Straighten(edges, debug=self.debug)
straight = straighten.process()
if self.debug:
cv2.imwrite('4-straight.png', straight)
self.cannify = Cannify(straight, debug=self.debug)
contimage = self.cannify.process()
contours = self.cannify.getDigits()
isolated = np.zeros((self.height, self.width, 3), np.uint8)
cv2.drawContours(isolated, contours, contourIdx=-1, color=(255, 255, 255))
# thickness=cv2.FILLED)
if self.debug:
cv2.imwrite('7-isolated.png', isolated)
# alternatively fill the contours
# todo: this does not let openings
# for c in contours:
# cv2.fillPoly(isolated, pts=[c], color=(255, 255, 255))
isolator = IsolateDigits(isolated)
digits = isolator.isolate(contours)
return straight, edges, contimage, isolated, digits
def resizeReshape(self, digits):
"""
Reformat from 2D image with 3 color channels
to a single line of pixels for machine learning
(actually x*y features of a single pixel)
@param digits:
@return:
"""
print('original shape', len(digits), digits[0].shape)
dimentions = self.sample_size[0] * self.sample_size[1]
samples = np.zeros((0, dimentions))
for d in digits:
d30 = cv2.resize(d, self.sample_size, interpolation=cv2.INTER_LANCZOS4)
gray = cv2.cvtColor(d30, cv2.COLOR_BGR2GRAY)
features = np.reshape(gray, dimentions)
samples = np.append(samples, [features], 0)
print('resulting shape', len(samples), samples[0].shape)
return samples
|
spidgorny/energy-monitor
|
python/Image/Pipeline.py
|
Python
|
unlicense
| 2,753
|
"""
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root == None:
return []
ret = []
queue = [root]
count_of_one_level = 1
while queue:
tmp = []
count_of_next_level = 0
while count_of_one_level:
node = queue.pop(0)
tmp.append(node.val)
if node.left:
queue.append(node.left)
count_of_next_level += 1
if node.right:
queue.append(node.right)
count_of_next_level += 1
count_of_one_level -= 1
count_of_one_level = count_of_next_level
ret.append(tmp)
return ret[::-1]
|
danielsunzhongyuan/my_leetcode_in_python
|
binary_tree_level_order_traversal_ii_107.py
|
Python
|
apache-2.0
| 1,341
|
import os, sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../")
import bz2
from pymcda.types import *
from pymcda.generate import generate_criteria
from pymcda.generate import generate_categories
from pymcda.generate import generate_categories_profiles
from pymcda.electre_tri import MRSort
from pymcda.learning.mip_mrsort_weights import MipMRSortWeights
from pymcda.learning.mip_mrsort_mobius import MipMRSortMobius
from pymcda.utils import print_pt_and_assignments
from coalition_additive_weights import generate_binary_performance_table_and_assignments
f = bz2.BZ2File(sys.argv[1])
tree = ElementTree.parse(f)
root = tree.getroot()
xmcda_criteria = root.find(".//criteria")
criteria = Criteria().from_xmcda(xmcda_criteria)
xmcda_csets = root.findall(".//criteriaSets")
f.close()
## Weights
w = CriteriaValues()
for c in criteria:
w.append(CriterionValue(c.id, 0.2))
## Profiles and categories
bp1 = AlternativePerformances('b1', {c.id: 0.5 for c in criteria})
bpt = PerformanceTable([bp1])
cat = generate_categories(2, names = ['good', 'bad'])
cps = generate_categories_profiles(cat)
## Model
model = MRSort(c, w, bpt, 0.6, cps)
fmins = []
results = []
for i, xmcda in enumerate(xmcda_csets):
result = {}
fmins = CriteriaSets().from_xmcda(xmcda)
result['fmins'] = fmins
result['vector'] = "".join(map(str, sorted([len(fmin)
for fmin in sorted(fmins, key = len)])))
print("\n%d. Fmin: %s" % (i + 1, ', '.join("%s" % f for f in fmins)))
pt, aa = generate_binary_performance_table_and_assignments(criteria, cat,
fmins)
aa.id = 'aa'
a = Alternatives([Alternative(a.id) for a in aa])
model = MRSort(criteria, None, bpt, None, cps)
mip = MipMRSortWeights(model, pt, aa)
obj = mip.solve()
aa2 = model.pessimist(pt)
aa2.id = 'aa_add'
print("MipMRSortWeights: Objective: %d (/%d)" % (obj, len(aa)))
anok = [a.id for a in aa if a.category_id != aa2[a.id].category_id]
print("Alternative not restored: %s" % ','.join("%s" % a for a in anok))
print(model.cv)
print("lambda: %s" % model.lbda)
result['obj_weights'] = obj
mip = MipMRSortMobius(model, pt, aa)
obj = mip.solve()
aa3 = model.pessimist(pt)
aa3.id = 'aa_capa'
print("MipMRSortMobius: Objective: %d (/%d)" % (obj, len(aa)))
anok = [a.id for a in aa if a.category_id != aa3[a.id].category_id]
print("Alternative not restored: %s" % ','.join("%s" % a for a in anok))
print(model.cv)
print("lambda: %s" % model.lbda)
result['obj_capa'] = obj
a = Alternatives([Alternative(a.id) for a in aa])
print_pt_and_assignments(a.keys(), criteria.keys(), [aa, aa2, aa3], pt)
results.append(result)
results.sort(key = lambda x: x['vector'])
results.sort(key = lambda x: len(x['vector']))
maxlen = max([len(', '.join("%s" % f for f in result['fmins']))
for result in results])
print("\n%*s obj_weights obj_capa" % (maxlen, "Fmin"))
for result in results:
print("%*s %*s %*s" % (maxlen, ', '.join("%s" % f
for f in sorted(result['fmins'], key = len)),
len('obj_weights'), result['obj_weights'],
len('obj_capa'), result['obj_capa']))
|
oso/pymcda
|
apps/test_coalition_additive_weights.py
|
Python
|
gpl-3.0
| 3,364
|
__all__ = ["bumbledriver", "dummydriver", "printcoredriver", "s3gdriver"]
|
qharley/BotQueue
|
bumblebee/drivers/__init__.py
|
Python
|
gpl-3.0
| 73
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipify_facts
short_description: Retrieve the public IP of your internet gateway.
description:
- If behind NAT and need to know the public IP of your internet gateway.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
api_url:
description:
- URL of the ipify.org API service.
- C(?format=json) will be appended per default.
required: false
default: 'https://api.ipify.org'
timeout:
description:
- HTTP connection timeout in seconds.
required: false
default: 10
version_added: "2.3"
validate_certs:
description:
- When set to C(NO), SSL certificates will not be validated.
required: false
default: "yes"
version_added: "2.4"
notes:
- "Visit https://www.ipify.org to get more information."
'''
EXAMPLES = '''
# Gather IP facts from ipify.org
- name: get my public IP
ipify_facts:
# Gather IP facts from your own ipify service endpoint with a custom timeout
- name: get my public IP
ipify_facts:
api_url: http://api.example.com/ipify
timeout: 20
'''
RETURN = '''
---
ipify_public_ip:
description: Public IP of the internet gateway.
returned: success
type: string
sample: 1.2.3.4
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
class IpifyFacts(object):
def __init__(self):
self.api_url = module.params.get('api_url')
self.timeout = module.params.get('timeout')
def run(self):
result = {
'ipify_public_ip': None
}
(response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
data = json.loads(to_text(response.read()))
result['ipify_public_ip'] = data.get('ip')
return result
def main():
global module
module = AnsibleModule(
argument_spec=dict(
api_url=dict(default='https://api.ipify.org/'),
timeout=dict(type='int', default=10),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
ipify_facts = IpifyFacts().run()
ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
module.exit_json(**ipify_facts_result)
if __name__ == '__main__':
main()
|
fernandezcuesta/ansible
|
lib/ansible/modules/net_tools/ipify_facts.py
|
Python
|
gpl-3.0
| 2,944
|
def makeTextFile():
import os
ls = os.linesep
# get filename
while True:
fname = raw_input('Enter file name: ')
if os.path.exists(fname):
print"*** ERROR: '%s' already exists" % fname
else:
break
# get file content (text) lines
all = []
print "\nEnter lines ('.' by itself to quit).\n"
# loop until user terminates input
while True:
entry = raw_input('> ')
if entry == '.':
break
else:
all.append(entry)
# write lines to file with proper line-ending
fobj = open(fname, 'w')
fobj.writelines(['%s%s' % (x, ls) for x in all])
fobj.close()
print 'DONE!'
print call()
def readTextFile():
# get filename
fname = raw_input('Enter file name: ')
print
# attempt to open file for reading
try:
fobj = open(fname, 'r')
except IOError, e:
print"*** file open error:", e
else:
# display contents to the screen
for eachLine in fobj:
print eachLine,
fobj.close()
print call()
def call():
t=raw_input("Input 'm' for make,'r' for read:")
if t=='r':
readTextFile()
elif t=='m':
makeTextFile()
else:
t=raw_input("Wrong answer!Input 'm' for make,'r' for read:")
print call()
|
github641/python-journey
|
summarry read and make text.py
|
Python
|
bsd-2-clause
| 1,404
|
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from pybuilder.core import use_plugin, task, init, description
use_plugin('python.core')
@init
def init_pytddmon_plugin(project):
project.build_depends_on('pytddmon', '>=1.0.2')
@task
@description('Start monitoring tests.')
def pytddmon(project, logger):
import os
unittest_directory = project.get_property('dir_source_unittest_python')
environment = os.environ.copy()
python_path_relative_to_basedir = project.get_property('dir_source_main_python')
absolute_python_path = os.path.join(project.basedir, python_path_relative_to_basedir)
environment['PYTHONPATH'] = absolute_python_path
# necessary because of windows newlines in the pytddmon shebang - must fix upstream first
python_interpreter = subprocess.check_output('which python', shell=True).rstrip('\n')
pytddmon_script = subprocess.check_output('which pytddmon.py', shell=True).rstrip('\n')
subprocess.Popen([python_interpreter, pytddmon_script, '--no-pulse'], shell=False, cwd=unittest_directory, env=environment)
|
shakamunyi/pybuilder
|
src/main/python/pybuilder/plugins/python/pytddmon_plugin.py
|
Python
|
apache-2.0
| 1,683
|
##
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
import sys
import os
import json
import urllib2
import random
class EurekaClient(object):
# Users can either set the EUREKA_URL environment variable
# or pass it as a parameter (which takes precedence)
def __init__(self, eurekaUrl=None):
self._eurekaUrl = eurekaUrl
if self._eurekaUrl is None or len(self._eurekaUrl) == 0:
self._eurekaUrl = os.getenv('EUREKA_URL')
# If appName is not set, then use NETFLIX_APP env, and finally default to 'genie'
def _getInstances(self, appName=None, status='UP'):
# if appName is not set, try to infer it from the environment - else default to 'genie'
if appName is None or len(appName) == 0:
appName = os.getenv("NETFLIX_APP")
if appName is None or len(appName) == 0:
appName = 'genie'
# ensure that we can find Eureka
if self._eurekaUrl is None or len(self._eurekaUrl) == 0:
raise RuntimeError("EUREKA_URL is not provided via env or constructor")
# get the response from Eureka
restUrl = self._eurekaUrl + '/' + appName
req = urllib2.Request(url=restUrl)
req.add_header('Accept', 'application/json')
assert req.get_method() == 'GET'
response = urllib2.urlopen(req)
# parse the json response
json_str = response.read()
json_obj = json.loads(json_str)
# get a list of all instances
instances = json_obj['application']['instance']
# check to see if this is a list or a singleton
isList = isinstance(instances, list)
# filter out the instances as specified by status
instancesList = []
if (isList):
for i in instances:
if i['status'] == status:
instancesList.append(i)
else:
# singleton instance
if instances['status'] == status:
instancesList.append(instances)
# ensure that we have at least 1 instance that is UP
assert len(instancesList) > 0, \
"No " + appName + " instances found that were " + status + " on " + self._eurekaUrl
# return each one
return instancesList
# If the SERVICE_BASE_URL environment variable is set, return it - e.g. http://localhost:7001
# Else use Eureka to find an instance that is UP
def getServiceBaseUrl(self, appName=None):
service_url = os.getenv('SERVICE_BASE_URL')
if service_url is not None and len(service_url) != 0:
print "Returning SERVICE_BASE_URL provided by environment variable:", service_url
print
return service_url
else:
print "Getting UP instance from Eureka: " + self._eurekaUrl
print
instancesUp = self._getInstances(appName, status='UP')
# pick a random one
instance = instancesUp[random.randrange(0, len(instancesUp), 1)]
service_url = 'http://' + instance['hostName'] + ':' + instance['port']['$']
return service_url
# Use Eureka to find instances that are OUT_OF_SERVICE
def getOOSInstances(self, appName=None):
return self._getInstances(appName, status='OUT_OF_SERVICE')
# Use Eureka to find instances that are UP
def getUPInstances(self, appName=None):
return self._getInstances(appName, status='UP')
if __name__ == "__main__":
client = EurekaClient()
print "Getting base URL for Genie Service from Eureka:"
print client.getServiceBaseUrl()
print
print "Getting list of all Genie OOS instances"
print client.getOOSInstances()
|
korrelate/genie
|
genie-web/src/test/python/utils/eureka.py
|
Python
|
apache-2.0
| 4,342
|
from .performance import Performance
from .junction import *
def compare(args):
if not args.multiclass:
rp = set()
rn = set()
ref_entries = 0
if not args.labels:
rp, ref_entries = Junction.createJuncSet(args.reference[0], use_strand=args.use_strand)
else:
rp, rn, ref_entries = Junction.createMarkedupJuncSets(args.reference[0], use_strand=args.use_strand)
print()
print("Reference:")
print(" - # total junctions:", ref_entries)
if args.labels:
print(" - # distinct positive junctions:", len(rp))
print(" - # distinct negative junctions:", len(rn))
else:
print(" - # distinct junctions:", len(rp))
print()
# Load all junction files
print("\t".join(
["File", "distinct", "total", Performance.longHeader() if args.labels else Performance.shortHeader()]))
recall = 0
precision = 0
f1 = 0
for f in args.input:
junc_set, bed_entries = Junction.createJuncSet(f, use_strand=args.use_strand)
# Build table
tab = list()
p = Performance()
if args.labels:
p.tp = len(junc_set & rp)
p.fp = len(junc_set & rn)
p.fn = len(rp - junc_set)
p.tn = len(rn - junc_set)
else:
p.tp = len(rp & junc_set)
p.fp = len(junc_set - rp)
p.fn = len(rp - junc_set)
print("\t".join([f, str(len(junc_set)), str(bed_entries), p.longStr() if args.labels else str(p)]))
recall += p.recall()
precision += p.precision()
f1 += p.F1()
if len(args.input) > 1:
print()
print("Mean recall: ", format(recall / len(args.input), '.2f'))
print("Mean precision: ", format(precision / len(args.input), '.2f'))
print("Mean f1: ", format(f1 / len(args.input), '.2f'))
else:
ref_set, ref_entries = Junction.createJuncSet(args.reference[0], use_strand=args.use_strand)
ref_ss = Junction.createSpliceSiteSet(args.reference[0], use_strand=args.use_strand)
print()
print("Reference:")
print(" - # distinct junctions:", len(ref_set))
print(" - # total junctions:", ref_entries)
print(" - # distinct splice sites:", len(ref_ss))
print()
# Load all bed files
print("Result legend:")
print("Class 1 = Intron in ref")
print("Class 2 = Both splice sites in ref")
print("Class 3 = Only 1 splice site in ref")
print("Class 4 = Novel")
print()
print("\t".join(["file", "class1", "class2", "class3", "class4"]))
for jf in args.input:
juncs, entries = Junction.createDict(jf, use_strand=args.use_strand, fullparse=True)
class1 = 0
class2 = 0
class3 = 0
class4 = 0
for key, value in juncs.items():
key1 = value.startSplicesiteKey()
key2 = value.endSplicesiteKey()
if value.key in ref_set:
class1 += 1
elif key1 in ref_ss and key2 in ref_ss:
class2 += 1
elif key1 in ref_ss or key2 in ref_ss:
class3 += 1
else:
class4 += 1
print("\t".join([jf, str(class1), str(class2), str(class3), str(class4)]))
def add_options(parser):
parser.add_argument("reference", nargs=1, help="The junction file to treat as the reference")
parser.add_argument("input", nargs="+", help="One or more junction files to compare against the reference")
parser.add_argument("-s", "--use_strand", action='store_true', default=False,
help="Whether to use strand information when building keys")
parser.add_argument("-l", "--labels",
help="Path to a file containing labels for the reference indicating whether or not each reference junction is genuine (as generated using the markup tool). If provided this script produces a much richer performance analysis. Not compatible with '--multiclass'")
parser.add_argument("-m", "--multiclass", action='store_true', default=False,
help="""Breakdown results into multiple classes:
1) Matching intron
2) Two matching splice sites but no matching intron (i.e. splice sites from different introns)
3) One matching splice site
4) No matching splice sites""")
|
maplesond/portcullis
|
scripts/junctools/junctools/compare.py
|
Python
|
gpl-3.0
| 3,895
|
'''
Copyright (C) 2005-17 www.interpss.org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from datetime import datetime
import tensorflow as tf
import numpy as np
import sys
sys.path.insert(0, '..')
import lib.common_func as cf
train_points = 50
#
# load the IEEE-14Bus case
#
filename = 'c:/temp/temp/ieee14-1.ieee'
busIdMappingFilename = 'c:/temp/temp/ieee14_busid2no.mapping'
branchIdMappingFilename = 'c:/temp/temp/ieee14_branchid2no.mapping'
intAry = cf.ipss_app.loadCase(filename, 'BranchPLoadChangeTrainCaseBuilder', busIdMappingFilename, branchIdMappingFilename)
noBus, noBranch = intAry
print(filename, ' loaded, no of Buses, Branches:', noBus, ', ', noBranch)
# define model size
size = noBus * 2
#print('size: ', size)
# define model variables
W1 = tf.Variable(tf.zeros([size,noBranch]))
b1 = tf.Variable(tf.zeros([noBranch]))
init = tf.initialize_all_variables()
# define model
def nn_model(data):
output = tf.matmul(data, W1) + b1
return output
# define loss
x = tf.placeholder(tf.float32, [None, size])
y = tf.placeholder(tf.float32)
error = tf.square(nn_model(x) - y)
loss = tf.reduce_sum(error)
# define training optimization
optimizer = tf.train.GradientDescentOptimizer(cf.learning_rate)
train = optimizer.minimize(loss)
# run the computation graph
with tf.Session() as sess :
sess.run(init)
# run the training part
# =====================
print('Begin training: ', datetime.now())
# retrieve training set
trainSet = cf.ipss_app.getTrainSet(train_points)
train_x, train_y = cf.transfer2PyArrays(trainSet)
#print2DArray(train_x, 'train_xSet', 'train_x')
#print2DArray(train_y, 'train_ySet', 'train_y')
# run the training part
for i in range(cf.train_steps):
if (i % 1000 == 0) : print('Training step: ', i)
sess.run(train, {x:train_x, y:train_y})
print('End training: ', datetime.now())
#print('W1: ', sess.run(W1))
#print('b1: ', sess.run(b1))
# run the verification part
# =========================
# retrieve a test case
testCase = cf.ipss_app.getTestCase();
test_x, test_y = cf.transfer2PyArrays(testCase)
#printArray(test_x, 'test_x')
#printArray(test_y, 'test_y')
# compute model output (network voltage)
model_y = sess.run(nn_model(x), {x:test_x})
#printArray(model_y[0], 'model_y')
print('max error: ', np.sqrt(np.max(np.abs(np.square(model_y - test_y)))))
|
interpss/DeepMachineLearning
|
lfGenNPred/py/single_net_mapping/predict_branchp2.py
|
Python
|
apache-2.0
| 3,105
|
"""
Test everything related to contents
"""
from gabbletest import sync_stream
from servicetest import (
make_channel_proxy, assertEquals, EventPattern)
import constants as cs
from jingletest2 import (
JingleTest2, JingleProtocol015, JingleProtocol031, test_dialects)
from twisted.words.xish import xpath
def worker(jp, q, bus, conn, stream):
def make_stream_request(stream_type):
media_iface.RequestStreams(remote_handle, [stream_type])
e = q.expect('dbus-signal', signal='NewStreamHandler')
stream_id = e.args[1]
stream_handler = make_channel_proxy(conn, e.args[0], 'Media.StreamHandler')
stream_handler.NewNativeCandidate("fake", jt2.get_remote_transports_dbus())
stream_handler.Ready(jt2.get_audio_codecs_dbus())
stream_handler.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED)
return (stream_handler, stream_id)
jt2 = JingleTest2(jp, conn, q, stream, 'test@localhost', 'foo@bar.com/Foo')
jt2.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, ["foo@bar.com/Foo"])[0]
# Remote end calls us
jt2.incoming_call()
# FIXME: these signals are not observable by real clients, since they
# happen before NewChannels.
# The caller is in members
e = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [remote_handle], [], [], [], 0, 0])
# We're pending because of remote_handle
e = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [], [], [self_handle], [], remote_handle,
cs.GC_REASON_INVITED])
media_chan = make_channel_proxy(conn, e.path, 'Channel.Interface.Group')
signalling_iface = make_channel_proxy(conn, e.path, 'Channel.Interface.MediaSignalling')
media_iface = make_channel_proxy(conn, e.path, 'Channel.Type.StreamedMedia')
# S-E gets notified about new session handler, and calls Ready on it
e = q.expect('dbus-signal', signal='NewSessionHandler')
assert e.args[1] == 'rtp'
session_handler = make_channel_proxy(conn, e.args[0], 'Media.SessionHandler')
session_handler.Ready()
media_chan.AddMembers([self_handle], 'accepted')
# S-E gets notified about a newly-created stream
e = q.expect('dbus-signal', signal='NewStreamHandler')
id1 = e.args[1]
stream_handler = make_channel_proxy(conn, e.args[0], 'Media.StreamHandler')
# We are now in members too
e = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [self_handle], [], [], [], self_handle,
cs.GC_REASON_NONE])
# we are now both in members
members = media_chan.GetMembers()
assert set(members) == set([self_handle, remote_handle]), members
stream_handler.NewNativeCandidate("fake", jt2.get_remote_transports_dbus())
stream_handler.Ready(jt2.get_audio_codecs_dbus())
stream_handler.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED)
# First one is transport-info
e = q.expect('stream-iq', predicate=jp.action_predicate('transport-info'))
assertEquals('foo@bar.com/Foo', e.query['initiator'])
# stream.send(gabbletest.make_result_iq(stream, e.stanza))
stream.send(jp.xml(jp.ResultIq('test@localhost', e.stanza, [])))
# S-E reports codec intersection, after which gabble can send acceptance
stream_handler.SupportedCodecs(jt2.get_audio_codecs_dbus())
# Second one is session-accept
e = q.expect('stream-iq', predicate=jp.action_predicate('session-accept'))
# stream.send(gabbletest.make_result_iq(stream, e.stanza))
stream.send(jp.xml(jp.ResultIq('test@localhost', e.stanza, [])))
# Here starts the interesting part of this test
# Remote end tries to create a content we can't handle
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-add', [
jp.Content('bogus', 'initiator', 'both', [
jp.Description('hologram', [
jp.PayloadType(name, str(rate), str(id)) for
(name, id, rate) in jt2.audio_codecs ]),
jp.TransportGoogleP2P() ]) ]) ])
stream.send(jp.xml(node))
# In older Jingle, this is a separate namespace, which isn't
# recognized, but it's a valid request, so it gets ackd and rejected
if jp.dialect == 'jingle-v0.15':
# Gabble should acknowledge content-add
q.expect('stream-iq', iq_type='result')
# .. and then send content-reject for the bogus content
e = q.expect('stream-iq', iq_type='set', predicate=lambda x:
xpath.queryForNodes("/iq/jingle[@action='content-reject']/content[@name='bogus']",
x.stanza))
# In new Jingle, this is a bogus subtype of recognized namespace,
# so Gabble returns a bad request error
else:
q.expect('stream-iq', iq_type='error')
# Remote end then tries to create a content with a name it's already used
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-add', [
jp.Content(jt2.audio_names[0], 'initiator', 'both', [
jp.Description('audio', [
jp.PayloadType(name, str(rate), str(id)) for
(name, id, rate) in jt2.audio_codecs ]),
jp.TransportGoogleP2P() ]) ]) ])
stream.send(jp.xml(node))
# Gabble should return error (content already exists)
q.expect('stream-iq', iq_type='error')
# We try to add a stream
(stream_handler2, id2) = make_stream_request(cs.MEDIA_STREAM_TYPE_VIDEO)
# Gabble should now send content-add
e = q.expect('stream-iq', iq_type='set', predicate=lambda x:
xpath.queryForNodes("/iq/jingle[@action='content-add']",
x.stanza))
c = e.query.firstChildElement()
assert c['creator'] == 'responder', c['creator']
stream.send(jp.xml(jp.ResultIq('test@localhost', e.stanza, [])))
# We try to add yet another stream
(stream_handler3, id3) = make_stream_request(cs.MEDIA_STREAM_TYPE_VIDEO)
# Gabble should send another content-add
e = q.expect('stream-iq', iq_type='set', predicate=lambda x:
xpath.queryForNodes("/iq/jingle[@action='content-add']",
x.stanza))
d = e.query.firstChildElement()
assertEquals('responder', d['creator'])
stream.send(jp.xml(jp.ResultIq('test@localhost', e.stanza, [])))
# Remote end rejects the first stream we tried to add.
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-reject', [
jp.Content(c['name'], c['creator'], c['senders'], []) ]) ])
stream.send(jp.xml(node))
# Gabble removes the stream
q.expect('dbus-signal', signal='StreamRemoved',
interface=cs.CHANNEL_TYPE_STREAMED_MEDIA)
# Remote end tries to add a content with the same name as the second one we
# just added
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-add', [
jp.Content(d['name'], 'initiator', 'both', [
jp.Description('audio', [
jp.PayloadType(name, str(rate), str(id)) for
(name, id, rate) in jt2.audio_codecs ]),
jp.TransportGoogleP2P() ]) ]) ])
stream.send(jp.xml(node))
# Because stream names are namespaced by creator, Gabble should be okay
# with that.
q.expect_many(
EventPattern('stream-iq', iq_type='result', iq_id=node[2]['id']),
EventPattern('dbus-signal', signal='StreamAdded'),
)
# Remote end thinks better of that, and removes the similarly-named stream
# it tried to add.
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-remove', [
jp.Content(d['name'], 'initiator', d['senders'], []) ]) ])
stream.send(jp.xml(node))
q.expect_many(
EventPattern('stream-iq', iq_type='result', iq_id=node[2]['id']),
EventPattern('dbus-signal', signal='StreamRemoved'),
)
# Remote end finally accepts. When Gabble did not namespace contents by
# their creator, it would NAK this IQ:
# - Gabble (responder) created a stream called 'foo';
# - test suite (initiator) created a stream called 'foo', which Gabble
# decided would replace its own stream called 'foo';
# - test suite removed its 'foo';
# - test suite accepted Gabble's 'foo', but Gabble didn't believe a stream
# called 'foo' existed any more.
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-accept', [
jp.Content(d['name'], d['creator'], d['senders'], [
jp.Description('video', [
jp.PayloadType(name, str(rate), str(id)) for
(name, id, rate) in jt2.audio_codecs ]),
jp.TransportGoogleP2P() ]) ]) ])
stream.send(jp.xml(node))
# We get remote codecs
e = q.expect('dbus-signal', signal='SetRemoteCodecs')
# Now, both we and remote peer try to remove the content simultaneously:
# Telepathy client calls RemoveStreams...
media_iface.RemoveStreams([id3])
# ...so Gabble sends a content-remove...
e = q.expect('stream-iq', iq_type='set', predicate=lambda x:
xpath.queryForNodes("/iq/jingle[@action='content-remove']",
x.stanza))
# ...but before it's acked the peer sends its own content-remove...
node = jp.SetIq(jt2.peer, jt2.jid, [
jp.Jingle(jt2.sid, jt2.peer, 'content-remove', [
jp.Content(c['name'], c['creator'], c['senders'], []) ]) ])
stream.send(jp.xml(node))
# ...and we don't want Gabble to break when that happens.
sync_stream(q, stream)
# Now we want to remove the first stream
media_iface.RemoveStreams([id1])
# Since this is the last stream, Gabble will just terminate the session.
e = q.expect('stream-iq', iq_type='set', predicate=lambda x:
xpath.queryForNodes("/iq/jingle[@action='session-terminate']",
x.stanza))
if __name__ == '__main__':
test_dialects(worker, [JingleProtocol015, JingleProtocol031])
|
community-ssu/telepathy-gabble
|
tests/twisted/jingle/test-content-complex.py
|
Python
|
lgpl-2.1
| 10,117
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create a scene with one of each cell type
# Voxel
voxelPoints = vtk.vtkPoints()
voxelPoints.SetNumberOfPoints(8)
voxelPoints.InsertPoint(0,0,0,0)
voxelPoints.InsertPoint(1,1,0,0)
voxelPoints.InsertPoint(2,0,1,0)
voxelPoints.InsertPoint(3,1,1,0)
voxelPoints.InsertPoint(4,0,0,1)
voxelPoints.InsertPoint(5,1,0,1)
voxelPoints.InsertPoint(6,0,1,1)
voxelPoints.InsertPoint(7,1,1,1)
aVoxel = vtk.vtkVoxel()
aVoxel.GetPointIds().SetId(0,0)
aVoxel.GetPointIds().SetId(1,1)
aVoxel.GetPointIds().SetId(2,2)
aVoxel.GetPointIds().SetId(3,3)
aVoxel.GetPointIds().SetId(4,4)
aVoxel.GetPointIds().SetId(5,5)
aVoxel.GetPointIds().SetId(6,6)
aVoxel.GetPointIds().SetId(7,7)
aVoxelGrid = vtk.vtkUnstructuredGrid()
aVoxelGrid.Allocate(1,1)
aVoxelGrid.InsertNextCell(aVoxel.GetCellType(),aVoxel.GetPointIds())
aVoxelGrid.SetPoints(voxelPoints)
aVoxelMapper = vtk.vtkDataSetMapper()
aVoxelMapper.SetInputData(aVoxelGrid)
aVoxelActor = vtk.vtkActor()
aVoxelActor.SetMapper(aVoxelMapper)
aVoxelActor.GetProperty().BackfaceCullingOn()
# Hexahedron
hexahedronPoints = vtk.vtkPoints()
hexahedronPoints.SetNumberOfPoints(8)
hexahedronPoints.InsertPoint(0,0,0,0)
hexahedronPoints.InsertPoint(1,1,0,0)
hexahedronPoints.InsertPoint(2,1,1,0)
hexahedronPoints.InsertPoint(3,0,1,0)
hexahedronPoints.InsertPoint(4,0,0,1)
hexahedronPoints.InsertPoint(5,1,0,1)
hexahedronPoints.InsertPoint(6,1,1,1)
hexahedronPoints.InsertPoint(7,0,1,1)
aHexahedron = vtk.vtkHexahedron()
aHexahedron.GetPointIds().SetId(0,0)
aHexahedron.GetPointIds().SetId(1,1)
aHexahedron.GetPointIds().SetId(2,2)
aHexahedron.GetPointIds().SetId(3,3)
aHexahedron.GetPointIds().SetId(4,4)
aHexahedron.GetPointIds().SetId(5,5)
aHexahedron.GetPointIds().SetId(6,6)
aHexahedron.GetPointIds().SetId(7,7)
aHexahedronGrid = vtk.vtkUnstructuredGrid()
aHexahedronGrid.Allocate(1,1)
aHexahedronGrid.InsertNextCell(aHexahedron.GetCellType(),aHexahedron.GetPointIds())
aHexahedronGrid.SetPoints(hexahedronPoints)
aHexahedronMapper = vtk.vtkDataSetMapper()
aHexahedronMapper.SetInputData(aHexahedronGrid)
aHexahedronActor = vtk.vtkActor()
aHexahedronActor.SetMapper(aHexahedronMapper)
aHexahedronActor.AddPosition(2,0,0)
aHexahedronActor.GetProperty().BackfaceCullingOn()
# Tetra
tetraPoints = vtk.vtkPoints()
tetraPoints.SetNumberOfPoints(4)
tetraPoints.InsertPoint(0,0,0,0)
tetraPoints.InsertPoint(1,1,0,0)
tetraPoints.InsertPoint(2,.5,1,0)
tetraPoints.InsertPoint(3,.5,.5,1)
aTetra = vtk.vtkTetra()
aTetra.GetPointIds().SetId(0,0)
aTetra.GetPointIds().SetId(1,1)
aTetra.GetPointIds().SetId(2,2)
aTetra.GetPointIds().SetId(3,3)
aTetraGrid = vtk.vtkUnstructuredGrid()
aTetraGrid.Allocate(1,1)
aTetraGrid.InsertNextCell(aTetra.GetCellType(),aTetra.GetPointIds())
aTetraGrid.SetPoints(tetraPoints)
aTetraMapper = vtk.vtkDataSetMapper()
aTetraMapper.SetInputData(aTetraGrid)
aTetraActor = vtk.vtkActor()
aTetraActor.SetMapper(aTetraMapper)
aTetraActor.AddPosition(4,0,0)
aTetraActor.GetProperty().BackfaceCullingOn()
# Wedge
wedgePoints = vtk.vtkPoints()
wedgePoints.SetNumberOfPoints(6)
wedgePoints.InsertPoint(0,0,1,0)
wedgePoints.InsertPoint(1,0,0,0)
wedgePoints.InsertPoint(2,0,.5,.5)
wedgePoints.InsertPoint(3,1,1,0)
wedgePoints.InsertPoint(4,1,0,0)
wedgePoints.InsertPoint(5,1,.5,.5)
aWedge = vtk.vtkWedge()
aWedge.GetPointIds().SetId(0,0)
aWedge.GetPointIds().SetId(1,1)
aWedge.GetPointIds().SetId(2,2)
aWedge.GetPointIds().SetId(3,3)
aWedge.GetPointIds().SetId(4,4)
aWedge.GetPointIds().SetId(5,5)
aWedgeGrid = vtk.vtkUnstructuredGrid()
aWedgeGrid.Allocate(1,1)
aWedgeGrid.InsertNextCell(aWedge.GetCellType(),aWedge.GetPointIds())
aWedgeGrid.SetPoints(wedgePoints)
aWedgeMapper = vtk.vtkDataSetMapper()
aWedgeMapper.SetInputData(aWedgeGrid)
aWedgeActor = vtk.vtkActor()
aWedgeActor.SetMapper(aWedgeMapper)
aWedgeActor.AddPosition(6,0,0)
aWedgeActor.GetProperty().BackfaceCullingOn()
# Pyramid
pyramidPoints = vtk.vtkPoints()
pyramidPoints.SetNumberOfPoints(5)
pyramidPoints.InsertPoint(0,0,0,0)
pyramidPoints.InsertPoint(1,1,0,0)
pyramidPoints.InsertPoint(2,1,1,0)
pyramidPoints.InsertPoint(3,0,1,0)
pyramidPoints.InsertPoint(4,.5,.5,1)
aPyramid = vtk.vtkPyramid()
aPyramid.GetPointIds().SetId(0,0)
aPyramid.GetPointIds().SetId(1,1)
aPyramid.GetPointIds().SetId(2,2)
aPyramid.GetPointIds().SetId(3,3)
aPyramid.GetPointIds().SetId(4,4)
aPyramidGrid = vtk.vtkUnstructuredGrid()
aPyramidGrid.Allocate(1,1)
aPyramidGrid.InsertNextCell(aPyramid.GetCellType(),aPyramid.GetPointIds())
aPyramidGrid.SetPoints(pyramidPoints)
aPyramidMapper = vtk.vtkDataSetMapper()
aPyramidMapper.SetInputData(aPyramidGrid)
aPyramidActor = vtk.vtkActor()
aPyramidActor.SetMapper(aPyramidMapper)
aPyramidActor.AddPosition(8,0,0)
aPyramidActor.GetProperty().BackfaceCullingOn()
# Pixel
pixelPoints = vtk.vtkPoints()
pixelPoints.SetNumberOfPoints(4)
pixelPoints.InsertPoint(0,0,0,0)
pixelPoints.InsertPoint(1,1,0,0)
pixelPoints.InsertPoint(2,0,1,0)
pixelPoints.InsertPoint(3,1,1,0)
aPixel = vtk.vtkPixel()
aPixel.GetPointIds().SetId(0,0)
aPixel.GetPointIds().SetId(1,1)
aPixel.GetPointIds().SetId(2,2)
aPixel.GetPointIds().SetId(3,3)
aPixelGrid = vtk.vtkUnstructuredGrid()
aPixelGrid.Allocate(1,1)
aPixelGrid.InsertNextCell(aPixel.GetCellType(),aPixel.GetPointIds())
aPixelGrid.SetPoints(pixelPoints)
aPixelMapper = vtk.vtkDataSetMapper()
aPixelMapper.SetInputData(aPixelGrid)
aPixelActor = vtk.vtkActor()
aPixelActor.SetMapper(aPixelMapper)
aPixelActor.AddPosition(0,0,2)
aPixelActor.GetProperty().BackfaceCullingOn()
# Quad
quadPoints = vtk.vtkPoints()
quadPoints.SetNumberOfPoints(4)
quadPoints.InsertPoint(0,0,0,0)
quadPoints.InsertPoint(1,1,0,0)
quadPoints.InsertPoint(2,1,1,0)
quadPoints.InsertPoint(3,0,1,0)
aQuad = vtk.vtkQuad()
aQuad.GetPointIds().SetId(0,0)
aQuad.GetPointIds().SetId(1,1)
aQuad.GetPointIds().SetId(2,2)
aQuad.GetPointIds().SetId(3,3)
aQuadGrid = vtk.vtkUnstructuredGrid()
aQuadGrid.Allocate(1,1)
aQuadGrid.InsertNextCell(aQuad.GetCellType(),aQuad.GetPointIds())
aQuadGrid.SetPoints(quadPoints)
aQuadMapper = vtk.vtkDataSetMapper()
aQuadMapper.SetInputData(aQuadGrid)
aQuadActor = vtk.vtkActor()
aQuadActor.SetMapper(aQuadMapper)
aQuadActor.AddPosition(2,0,2)
aQuadActor.GetProperty().BackfaceCullingOn()
# Triangle
trianglePoints = vtk.vtkPoints()
trianglePoints.SetNumberOfPoints(3)
trianglePoints.InsertPoint(0,0,0,0)
trianglePoints.InsertPoint(1,1,0,0)
trianglePoints.InsertPoint(2,.5,.5,0)
aTriangle = vtk.vtkTriangle()
aTriangle.GetPointIds().SetId(0,0)
aTriangle.GetPointIds().SetId(1,1)
aTriangle.GetPointIds().SetId(2,2)
aTriangleGrid = vtk.vtkUnstructuredGrid()
aTriangleGrid.Allocate(1,1)
aTriangleGrid.InsertNextCell(aTriangle.GetCellType(),aTriangle.GetPointIds())
aTriangleGrid.SetPoints(trianglePoints)
aTriangleMapper = vtk.vtkDataSetMapper()
aTriangleMapper.SetInputData(aTriangleGrid)
aTriangleActor = vtk.vtkActor()
aTriangleActor.SetMapper(aTriangleMapper)
aTriangleActor.AddPosition(4,0,2)
aTriangleActor.GetProperty().BackfaceCullingOn()
# Polygon
polygonPoints = vtk.vtkPoints()
polygonPoints.SetNumberOfPoints(4)
polygonPoints.InsertPoint(0,0,0,0)
polygonPoints.InsertPoint(1,1,0,0)
polygonPoints.InsertPoint(2,1,1,0)
polygonPoints.InsertPoint(3,0,1,0)
aPolygon = vtk.vtkPolygon()
aPolygon.GetPointIds().SetNumberOfIds(4)
aPolygon.GetPointIds().SetId(0,0)
aPolygon.GetPointIds().SetId(1,1)
aPolygon.GetPointIds().SetId(2,2)
aPolygon.GetPointIds().SetId(3,3)
aPolygonGrid = vtk.vtkUnstructuredGrid()
aPolygonGrid.Allocate(1,1)
aPolygonGrid.InsertNextCell(aPolygon.GetCellType(),aPolygon.GetPointIds())
aPolygonGrid.SetPoints(polygonPoints)
aPolygonMapper = vtk.vtkDataSetMapper()
aPolygonMapper.SetInputData(aPolygonGrid)
aPolygonActor = vtk.vtkActor()
aPolygonActor.SetMapper(aPolygonMapper)
aPolygonActor.AddPosition(6,0,2)
aPolygonActor.GetProperty().BackfaceCullingOn()
# Triangle Strip
triangleStripPoints = vtk.vtkPoints()
triangleStripPoints.SetNumberOfPoints(5)
triangleStripPoints.InsertPoint(0,0,1,0)
triangleStripPoints.InsertPoint(1,0,0,0)
triangleStripPoints.InsertPoint(2,1,1,0)
triangleStripPoints.InsertPoint(3,1,0,0)
triangleStripPoints.InsertPoint(4,2,1,0)
aTriangleStrip = vtk.vtkTriangleStrip()
aTriangleStrip.GetPointIds().SetNumberOfIds(5)
aTriangleStrip.GetPointIds().SetId(0,0)
aTriangleStrip.GetPointIds().SetId(1,1)
aTriangleStrip.GetPointIds().SetId(2,2)
aTriangleStrip.GetPointIds().SetId(3,3)
aTriangleStrip.GetPointIds().SetId(4,4)
aTriangleStripGrid = vtk.vtkUnstructuredGrid()
aTriangleStripGrid.Allocate(1,1)
aTriangleStripGrid.InsertNextCell(aTriangleStrip.GetCellType(),aTriangleStrip.GetPointIds())
aTriangleStripGrid.SetPoints(triangleStripPoints)
aTriangleStripMapper = vtk.vtkDataSetMapper()
aTriangleStripMapper.SetInputData(aTriangleStripGrid)
aTriangleStripActor = vtk.vtkActor()
aTriangleStripActor.SetMapper(aTriangleStripMapper)
aTriangleStripActor.AddPosition(8,0,2)
aTriangleStripActor.GetProperty().BackfaceCullingOn()
# Line
linePoints = vtk.vtkPoints()
linePoints.SetNumberOfPoints(2)
linePoints.InsertPoint(0,0,0,0)
linePoints.InsertPoint(1,1,1,0)
aLine = vtk.vtkLine()
aLine.GetPointIds().SetId(0,0)
aLine.GetPointIds().SetId(1,1)
aLineGrid = vtk.vtkUnstructuredGrid()
aLineGrid.Allocate(1,1)
aLineGrid.InsertNextCell(aLine.GetCellType(),aLine.GetPointIds())
aLineGrid.SetPoints(linePoints)
aLineMapper = vtk.vtkDataSetMapper()
aLineMapper.SetInputData(aLineGrid)
aLineActor = vtk.vtkActor()
aLineActor.SetMapper(aLineMapper)
aLineActor.AddPosition(0,0,4)
aLineActor.GetProperty().BackfaceCullingOn()
# Poly line
polyLinePoints = vtk.vtkPoints()
polyLinePoints.SetNumberOfPoints(3)
polyLinePoints.InsertPoint(0,0,0,0)
polyLinePoints.InsertPoint(1,1,1,0)
polyLinePoints.InsertPoint(2,1,0,0)
aPolyLine = vtk.vtkPolyLine()
aPolyLine.GetPointIds().SetNumberOfIds(3)
aPolyLine.GetPointIds().SetId(0,0)
aPolyLine.GetPointIds().SetId(1,1)
aPolyLine.GetPointIds().SetId(2,2)
aPolyLineGrid = vtk.vtkUnstructuredGrid()
aPolyLineGrid.Allocate(1,1)
aPolyLineGrid.InsertNextCell(aPolyLine.GetCellType(),aPolyLine.GetPointIds())
aPolyLineGrid.SetPoints(polyLinePoints)
aPolyLineMapper = vtk.vtkDataSetMapper()
aPolyLineMapper.SetInputData(aPolyLineGrid)
aPolyLineActor = vtk.vtkActor()
aPolyLineActor.SetMapper(aPolyLineMapper)
aPolyLineActor.AddPosition(2,0,4)
aPolyLineActor.GetProperty().BackfaceCullingOn()
# Vertex
vertexPoints = vtk.vtkPoints()
vertexPoints.SetNumberOfPoints(1)
vertexPoints.InsertPoint(0,0,0,0)
aVertex = vtk.vtkVertex()
aVertex.GetPointIds().SetId(0,0)
aVertexGrid = vtk.vtkUnstructuredGrid()
aVertexGrid.Allocate(1,1)
aVertexGrid.InsertNextCell(aVertex.GetCellType(),aVertex.GetPointIds())
aVertexGrid.SetPoints(vertexPoints)
aVertexMapper = vtk.vtkDataSetMapper()
aVertexMapper.SetInputData(aVertexGrid)
aVertexActor = vtk.vtkActor()
aVertexActor.SetMapper(aVertexMapper)
aVertexActor.AddPosition(0,0,6)
aVertexActor.GetProperty().BackfaceCullingOn()
# Poly Vertex
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(3)
polyVertexPoints.InsertPoint(0,0,0,0)
polyVertexPoints.InsertPoint(1,1,0,0)
polyVertexPoints.InsertPoint(2,1,1,0)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(3)
aPolyVertex.GetPointIds().SetId(0,0)
aPolyVertex.GetPointIds().SetId(1,1)
aPolyVertex.GetPointIds().SetId(2,2)
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1,1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.AddPosition(2,0,6)
aPolyVertexActor.GetProperty().BackfaceCullingOn()
# Pentagonal prism
pentaPoints = vtk.vtkPoints()
pentaPoints.SetNumberOfPoints(10)
pentaPoints.InsertPoint(0,0.25,0.0,0.0)
pentaPoints.InsertPoint(1,0.75,0.0,0.0)
pentaPoints.InsertPoint(2,1.0,0.5,0.0)
pentaPoints.InsertPoint(3,0.5,1.0,0.0)
pentaPoints.InsertPoint(4,0.0,0.5,0.0)
pentaPoints.InsertPoint(5,0.25,0.0,1.0)
pentaPoints.InsertPoint(6,0.75,0.0,1.0)
pentaPoints.InsertPoint(7,1.0,0.5,1.0)
pentaPoints.InsertPoint(8,0.5,1.0,1.0)
pentaPoints.InsertPoint(9,0.0,0.5,1.0)
aPenta = vtk.vtkPentagonalPrism()
aPenta.GetPointIds().SetId(0,0)
aPenta.GetPointIds().SetId(1,1)
aPenta.GetPointIds().SetId(2,2)
aPenta.GetPointIds().SetId(3,3)
aPenta.GetPointIds().SetId(4,4)
aPenta.GetPointIds().SetId(5,5)
aPenta.GetPointIds().SetId(6,6)
aPenta.GetPointIds().SetId(7,7)
aPenta.GetPointIds().SetId(8,8)
aPenta.GetPointIds().SetId(9,9)
aPentaGrid = vtk.vtkUnstructuredGrid()
aPentaGrid.Allocate(1,1)
aPentaGrid.InsertNextCell(aPenta.GetCellType(),aPenta.GetPointIds())
aPentaGrid.SetPoints(pentaPoints)
aPentaMapper = vtk.vtkDataSetMapper()
aPentaMapper.SetInputData(aPentaGrid)
aPentaActor = vtk.vtkActor()
aPentaActor.SetMapper(aPentaMapper)
aPentaActor.AddPosition(10,0,0)
aPentaActor.GetProperty().BackfaceCullingOn()
# Hexagonal prism
hexaPoints = vtk.vtkPoints()
hexaPoints.SetNumberOfPoints(12)
hexaPoints.InsertPoint(0,0.0,0.0,0.0)
hexaPoints.InsertPoint(1,0.5,0.0,0.0)
hexaPoints.InsertPoint(2,1.0,0.5,0.0)
hexaPoints.InsertPoint(3,1.0,1.0,0.0)
hexaPoints.InsertPoint(4,0.5,1.0,0.0)
hexaPoints.InsertPoint(5,0.0,0.5,0.0)
hexaPoints.InsertPoint(6,0.0,0.0,1.0)
hexaPoints.InsertPoint(7,0.5,0.0,1.0)
hexaPoints.InsertPoint(8,1.0,0.5,1.0)
hexaPoints.InsertPoint(9,1.0,1.0,1.0)
hexaPoints.InsertPoint(10,0.5,1.0,1.0)
hexaPoints.InsertPoint(11,0.0,0.5,1.0)
aHexa = vtk.vtkHexagonalPrism()
aHexa.GetPointIds().SetId(0,0)
aHexa.GetPointIds().SetId(1,1)
aHexa.GetPointIds().SetId(2,2)
aHexa.GetPointIds().SetId(3,3)
aHexa.GetPointIds().SetId(4,4)
aHexa.GetPointIds().SetId(5,5)
aHexa.GetPointIds().SetId(6,6)
aHexa.GetPointIds().SetId(7,7)
aHexa.GetPointIds().SetId(8,8)
aHexa.GetPointIds().SetId(9,9)
aHexa.GetPointIds().SetId(10,10)
aHexa.GetPointIds().SetId(11,11)
aHexaGrid = vtk.vtkUnstructuredGrid()
aHexaGrid.Allocate(1,1)
aHexaGrid.InsertNextCell(aHexa.GetCellType(),aHexa.GetPointIds())
aHexaGrid.SetPoints(hexaPoints)
aHexaMapper = vtk.vtkDataSetMapper()
aHexaMapper.SetInputData(aHexaGrid)
aHexaActor = vtk.vtkActor()
aHexaActor.SetMapper(aHexaMapper)
aHexaActor.AddPosition(12,0,0)
aHexaActor.GetProperty().BackfaceCullingOn()
ren1.SetBackground(.1,.2,.4)
ren1.AddActor(aVoxelActor)
aVoxelActor.GetProperty().SetDiffuseColor(1,0,0)
ren1.AddActor(aHexahedronActor)
aHexahedronActor.GetProperty().SetDiffuseColor(1,1,0)
ren1.AddActor(aTetraActor)
aTetraActor.GetProperty().SetDiffuseColor(0,1,0)
ren1.AddActor(aWedgeActor)
aWedgeActor.GetProperty().SetDiffuseColor(0,1,1)
ren1.AddActor(aPyramidActor)
aPyramidActor.GetProperty().SetDiffuseColor(1,0,1)
ren1.AddActor(aPixelActor)
aPixelActor.GetProperty().SetDiffuseColor(0,1,1)
ren1.AddActor(aQuadActor)
aQuadActor.GetProperty().SetDiffuseColor(1,0,1)
ren1.AddActor(aTriangleActor)
aTriangleActor.GetProperty().SetDiffuseColor(.3,1,.5)
ren1.AddActor(aPolygonActor)
aPolygonActor.GetProperty().SetDiffuseColor(1,.4,.5)
ren1.AddActor(aTriangleStripActor)
aTriangleStripActor.GetProperty().SetDiffuseColor(.3,.7,1)
ren1.AddActor(aLineActor)
aLineActor.GetProperty().SetDiffuseColor(.2,1,1)
ren1.AddActor(aPolyLineActor)
aPolyLineActor.GetProperty().SetDiffuseColor(1,1,1)
ren1.AddActor(aVertexActor)
aVertexActor.GetProperty().SetDiffuseColor(1,1,1)
ren1.AddActor(aPolyVertexActor)
aPolyVertexActor.GetProperty().SetDiffuseColor(1,1,1)
ren1.AddActor(aPentaActor)
aPentaActor.GetProperty().SetDiffuseColor(.2,.4,.7)
ren1.AddActor(aHexaActor)
aHexaActor.GetProperty().SetDiffuseColor(.7,.5,1)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(30)
ren1.GetActiveCamera().Elevation(20)
ren1.GetActiveCamera().Dolly(1.25)
ren1.ResetCameraClippingRange()
renWin.Render()
cellPicker = vtk.vtkCellPicker()
pointPicker = vtk.vtkPointPicker()
worldPicker = vtk.vtkWorldPointPicker()
cellCount = 0
pointCount = 0
ren1.IsInViewport(0,0)
x = 0
while x <= 265:
y = 100
while y <= 200:
cellPicker.Pick(x,y,0,ren1)
pointPicker.Pick(x,y,0,ren1)
worldPicker.Pick(x,y,0,ren1)
if (cellPicker.GetCellId() != "-1"):
cellCount = cellCount + 1
pass
if (pointPicker.GetPointId() != "-1"):
pointCount = pointCount + 1
pass
y = y + 6
x = x + 6
# render the image
#
iren.Initialize()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/pickCells.py
|
Python
|
gpl-3.0
| 16,540
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from collections import defaultdict
from datetime import datetime, time
from dateutil import relativedelta
from itertools import groupby
from json import dumps
from psycopg2 import OperationalError
from odoo import SUPERUSER_ID, _, api, fields, models, registry
from odoo.addons.stock.models.stock_rule import ProcurementException
from odoo.exceptions import UserError, ValidationError
from odoo.osv import expression
from odoo.tools import float_compare, frozendict, split_every
_logger = logging.getLogger(__name__)
class StockWarehouseOrderpoint(models.Model):
""" Defines Minimum stock rules. """
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
_check_company_auto = True
_order = "location_id,company_id,id"
@api.model
def default_get(self, fields):
res = super().default_get(fields)
warehouse = None
if 'warehouse_id' not in res and res.get('company_id'):
warehouse = self.env['stock.warehouse'].search([('company_id', '=', res['company_id'])], limit=1)
if warehouse:
res['warehouse_id'] = warehouse.id
res['location_id'] = warehouse.lot_stock_id.id
return res
@api.model
def _domain_product_id(self):
domain = "('type', '=', 'product')"
if self.env.context.get('active_model') == 'product.template':
product_template_id = self.env.context.get('active_id', False)
domain = f"('product_tmpl_id', '=', {product_template_id})"
elif self.env.context.get('default_product_id', False):
product_id = self.env.context.get('default_product_id', False)
domain = f"('id', '=', {product_id})"
return f"[{domain}, '|', ('company_id', '=', False), ('company_id', '=', company_id)]"
name = fields.Char(
'Name', copy=False, required=True, readonly=True,
default=lambda self: self.env['ir.sequence'].next_by_code('stock.orderpoint'))
trigger = fields.Selection([
('auto', 'Auto'), ('manual', 'Manual')], string='Trigger', default='auto', required=True)
active = fields.Boolean(
'Active', default=True,
help="If the active field is set to False, it will allow you to hide the orderpoint without removing it.")
snoozed_until = fields.Date('Snoozed', help="Hidden until next scheduler.")
warehouse_id = fields.Many2one(
'stock.warehouse', 'Warehouse',
check_company=True, ondelete="cascade", required=True)
location_id = fields.Many2one(
'stock.location', 'Location', index=True,
ondelete="cascade", required=True, check_company=True)
product_tmpl_id = fields.Many2one('product.template', related='product_id.product_tmpl_id')
product_id = fields.Many2one(
'product.product', 'Product', index=True,
domain=lambda self: self._domain_product_id(),
ondelete='cascade', required=True, check_company=True)
product_category_id = fields.Many2one('product.category', name='Product Category', related='product_id.categ_id', store=True)
product_uom = fields.Many2one(
'uom.uom', 'Unit of Measure', related='product_id.uom_id')
product_uom_name = fields.Char(string='Product unit of measure label', related='product_uom.display_name', readonly=True)
product_min_qty = fields.Float(
'Min Quantity', digits='Product Unit of Measure', required=True, default=0.0,
help="When the virtual stock equals to or goes below the Min Quantity specified for this field, Odoo generates "
"a procurement to bring the forecasted quantity to the Max Quantity.")
product_max_qty = fields.Float(
'Max Quantity', digits='Product Unit of Measure', required=True, default=0.0,
help="When the virtual stock goes below the Min Quantity, Odoo generates "
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity.")
qty_multiple = fields.Float(
'Multiple Quantity', digits='Product Unit of Measure',
default=1, required=True,
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used.")
group_id = fields.Many2one(
'procurement.group', 'Procurement Group', copy=False,
help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by stock rules will be grouped into one big picking.")
company_id = fields.Many2one(
'res.company', 'Company', required=True, index=True,
default=lambda self: self.env.company)
allowed_location_ids = fields.One2many(comodel_name='stock.location', compute='_compute_allowed_location_ids')
rule_ids = fields.Many2many('stock.rule', string='Rules used', compute='_compute_rules')
json_lead_days_popover = fields.Char(compute='_compute_json_popover')
lead_days_date = fields.Date(compute='_compute_lead_days')
allowed_route_ids = fields.Many2many('stock.location.route', compute='_compute_allowed_route_ids')
route_id = fields.Many2one(
'stock.location.route', string='Preferred Route', domain="[('id', 'in', allowed_route_ids)]")
qty_on_hand = fields.Float('On Hand', readonly=True, compute='_compute_qty')
qty_forecast = fields.Float('Forecast', readonly=True, compute='_compute_qty')
qty_to_order = fields.Float('To Order', compute='_compute_qty_to_order', store=True, readonly=False)
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
@api.depends('warehouse_id')
def _compute_allowed_location_ids(self):
loc_domain = [('usage', 'in', ('internal', 'view'))]
# We want to keep only the locations
# - strictly belonging to our warehouse
# - not belonging to any warehouses
for orderpoint in self:
other_warehouses = self.env['stock.warehouse'].search([('id', '!=', orderpoint.warehouse_id.id)])
for view_location_id in other_warehouses.mapped('view_location_id'):
loc_domain = expression.AND([loc_domain, ['!', ('id', 'child_of', view_location_id.id)]])
loc_domain = expression.AND([loc_domain, ['|', ('company_id', '=', False), ('company_id', '=', orderpoint.company_id.id)]])
orderpoint.allowed_location_ids = self.env['stock.location'].search(loc_domain)
@api.depends('warehouse_id', 'location_id')
def _compute_allowed_route_ids(self):
route_by_product = self.env['stock.location.route'].search([
('product_selectable', '=', True),
])
self.allowed_route_ids = route_by_product.ids
@api.depends('rule_ids', 'product_id.seller_ids', 'product_id.seller_ids.delay')
def _compute_json_popover(self):
for orderpoint in self:
if not orderpoint.product_id or not orderpoint.location_id:
orderpoint.json_lead_days_popover = False
continue
dummy, lead_days_description = orderpoint.rule_ids._get_lead_days(orderpoint.product_id)
orderpoint.json_lead_days_popover = dumps({
'title': _('Replenishment'),
'icon': 'fa-area-chart',
'popoverTemplate': 'stock.leadDaysPopOver',
'lead_days_date': fields.Date.to_string(orderpoint.lead_days_date),
'lead_days_description': lead_days_description,
'today': fields.Date.to_string(fields.Date.today()),
'trigger': orderpoint.trigger,
'qty_forecast': orderpoint.qty_forecast,
'qty_to_order': orderpoint.qty_to_order,
'product_min_qty': orderpoint.product_min_qty,
'product_max_qty': orderpoint.product_max_qty,
'product_uom_name': orderpoint.product_uom_name,
'virtual': orderpoint.trigger == 'manual' and orderpoint.create_uid.id == SUPERUSER_ID,
})
@api.depends('rule_ids', 'product_id.seller_ids', 'product_id.seller_ids.delay')
def _compute_lead_days(self):
for orderpoint in self:
if not orderpoint.product_id or not orderpoint.location_id:
orderpoint.lead_days_date = False
continue
lead_days, dummy = orderpoint.rule_ids._get_lead_days(orderpoint.product_id)
lead_days_date = fields.Date.today() + relativedelta.relativedelta(days=lead_days)
orderpoint.lead_days_date = lead_days_date
@api.depends('route_id', 'product_id', 'location_id', 'company_id', 'warehouse_id', 'product_id.route_ids')
def _compute_rules(self):
for orderpoint in self:
if not orderpoint.product_id or not orderpoint.location_id:
orderpoint.rule_ids = False
continue
orderpoint.rule_ids = orderpoint.product_id._get_rules_from_location(orderpoint.location_id, route_ids=orderpoint.route_id)
@api.constrains('product_id')
def _check_product_uom(self):
''' Check if the UoM has the same category as the product standard UoM '''
if any(orderpoint.product_id.uom_id.category_id != orderpoint.product_uom.category_id for orderpoint in self):
raise ValidationError(_('You have to select a product unit of measure that is in the same category as the default unit of measure of the product'))
@api.onchange('location_id')
def _onchange_location_id(self):
warehouse = self.location_id.get_warehouse().id
if warehouse:
self.warehouse_id = warehouse
@api.onchange('warehouse_id')
def _onchange_warehouse_id(self):
""" Finds location id for changed warehouse. """
if self.warehouse_id:
self.location_id = self.warehouse_id.lot_stock_id.id
else:
self.location_id = False
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
self.product_uom = self.product_id.uom_id.id
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
self.warehouse_id = self.env['stock.warehouse'].search([
('company_id', '=', self.company_id.id)
], limit=1)
def write(self, vals):
if 'company_id' in vals:
for orderpoint in self:
if orderpoint.company_id.id != vals['company_id']:
raise UserError(_("Changing the company of this record is forbidden at this point, you should rather archive it and create a new one."))
return super().write(vals)
@api.model
def action_open_orderpoints(self):
return self._get_orderpoint_action()
def action_replenish(self):
self._procure_orderpoint_confirm(company_id=self.env.company)
notification = False
if len(self) == 1:
notification = self._get_replenishment_order_notification()
# Forced to call compute quantity because we don't have a link.
self._compute_qty()
self.filtered(lambda o: o.create_uid.id == SUPERUSER_ID and o.qty_to_order <= 0.0 and o.trigger == 'manual').unlink()
return notification
def action_replenish_auto(self):
self.trigger = 'auto'
return self.action_replenish()
@api.depends('product_id', 'location_id', 'product_id.stock_move_ids', 'product_id.stock_move_ids.state', 'product_id.stock_move_ids.product_uom_qty')
def _compute_qty(self):
orderpoints_contexts = defaultdict(lambda: self.env['stock.warehouse.orderpoint'])
for orderpoint in self:
if not orderpoint.product_id or not orderpoint.location_id:
orderpoint.qty_on_hand = False
orderpoint.qty_forecast = False
continue
orderpoint_context = orderpoint._get_product_context()
product_context = frozendict({**self.env.context, **orderpoint_context})
orderpoints_contexts[product_context] |= orderpoint
for orderpoint_context, orderpoints_by_context in orderpoints_contexts.items():
products_qty = orderpoints_by_context.product_id.with_context(orderpoint_context)._product_available()
products_qty_in_progress = orderpoints_by_context._quantity_in_progress()
for orderpoint in orderpoints_by_context:
orderpoint.qty_on_hand = products_qty[orderpoint.product_id.id]['qty_available']
orderpoint.qty_forecast = products_qty[orderpoint.product_id.id]['virtual_available'] + products_qty_in_progress[orderpoint.id]
@api.depends('qty_multiple', 'qty_forecast', 'product_min_qty', 'product_max_qty')
def _compute_qty_to_order(self):
for orderpoint in self:
if not orderpoint.product_id or not orderpoint.location_id:
orderpoint.qty_to_order = False
continue
qty_to_order = 0.0
rounding = orderpoint.product_uom.rounding
if float_compare(orderpoint.qty_forecast, orderpoint.product_min_qty, precision_rounding=rounding) < 0:
qty_to_order = max(orderpoint.product_min_qty, orderpoint.product_max_qty) - orderpoint.qty_forecast
remainder = orderpoint.qty_multiple > 0 and qty_to_order % orderpoint.qty_multiple or 0.0
if float_compare(remainder, 0.0, precision_rounding=rounding) > 0:
qty_to_order += orderpoint.qty_multiple - remainder
orderpoint.qty_to_order = qty_to_order
def _set_default_route_id(self):
""" Write the `route_id` field on `self`. This method is intendend to be called on the
orderpoints generated when openning the replenish report.
"""
self = self.filtered(lambda o: not o.route_id)
rules_groups = self.env['stock.rule'].read_group([
('route_id.product_selectable', '!=', False),
('location_id', 'in', self.location_id.ids),
('action', 'in', ['pull_push', 'pull'])
], ['location_id', 'route_id'], ['location_id', 'route_id'], lazy=False)
for g in rules_groups:
if not g.get('route_id'):
continue
orderpoints = self.filtered(lambda o: o.location_id.id == g['location_id'][0])
orderpoints.route_id = g['route_id']
def _get_product_context(self):
"""Used to call `virtual_available` when running an orderpoint."""
self.ensure_one()
return {
'location': self.location_id.id,
'to_date': datetime.combine(self.lead_days_date, time.max)
}
def _get_orderpoint_action(self):
"""Create manual orderpoints for missing product in each warehouses. It also removes
orderpoints that have been replenish. In order to do it:
- It uses the report.stock.quantity to find missing quantity per product/warehouse
- It checks if orderpoint already exist to refill this location.
- It checks if it exists other sources (e.g RFQ) tha refill the warehouse.
- It creates the orderpoints for missing quantity that were not refill by an upper option.
return replenish report ir.actions.act_window
"""
action = self.env["ir.actions.actions"]._for_xml_id("stock.action_orderpoint_replenish")
action['context'] = self.env.context
# Search also with archived ones to avoid to trigger product_location_check SQL constraints later
# It means that when there will be a archived orderpoint on a location + product, the replenishment
# report won't take in account this location + product and it won't create any manual orderpoint
# In master: the active field should be remove
orderpoints = self.env['stock.warehouse.orderpoint'].with_context(active_test=False).search([])
# Remove previous automatically created orderpoint that has been refilled.
to_remove = orderpoints.filtered(lambda o: o.create_uid.id == SUPERUSER_ID and o.qty_to_order <= 0.0 and o.trigger == 'manual')
to_remove.unlink()
orderpoints = orderpoints - to_remove
to_refill = defaultdict(float)
all_product_ids = []
all_warehouse_ids = []
qty_by_product_warehouse = self.env['report.stock.quantity'].read_group(
[('date', '=', fields.date.today()), ('state', '=', 'forecast')],
['product_id', 'product_qty', 'warehouse_id'],
['product_id', 'warehouse_id'], lazy=False)
for group in qty_by_product_warehouse:
warehouse_id = group.get('warehouse_id') and group['warehouse_id'][0]
if group['product_qty'] >= 0.0 or not warehouse_id:
continue
all_product_ids.append(group['product_id'][0])
all_warehouse_ids.append(warehouse_id)
to_refill[(group['product_id'][0], warehouse_id)] = group['product_qty']
if not to_refill:
return action
# Recompute the forecasted quantity for missing product today but at this time
# with their real lead days.
key_to_remove = []
# group product by lead_days and warehouse in order to read virtual_available
# in batch
pwh_per_day = defaultdict(list)
for (product, warehouse), quantity in to_refill.items():
product = self.env['product.product'].browse(product).with_prefetch(all_product_ids)
warehouse = self.env['stock.warehouse'].browse(warehouse).with_prefetch(all_warehouse_ids)
rules = product._get_rules_from_location(warehouse.lot_stock_id)
lead_days = rules.with_context(bypass_delay_description=True)._get_lead_days(product)[0]
pwh_per_day[(lead_days, warehouse)].append(product.id)
for (days, warehouse), p_ids in pwh_per_day.items():
products = self.env['product.product'].browse(p_ids)
qties = products.with_context(
warehouse=warehouse.id,
to_date=fields.datetime.now() + relativedelta.relativedelta(days=days)
).read(['virtual_available'])
for qty in qties:
if float_compare(qty['virtual_available'], 0, precision_rounding=product.uom_id.rounding) >= 0:
key_to_remove.append((qty['id'], warehouse.id))
else:
to_refill[(qty['id'], warehouse.id)] = qty['virtual_available']
for key in key_to_remove:
del to_refill[key]
if not to_refill:
return action
# Remove incoming quantity from other origin than moves (e.g RFQ)
product_ids, warehouse_ids = zip(*to_refill)
dummy, qty_by_product_wh = self.env['product.product'].browse(product_ids)._get_quantity_in_progress(warehouse_ids=warehouse_ids)
rounding = self.env['decimal.precision'].precision_get('Product Unit of Measure')
# Group orderpoint by product-warehouse
orderpoint_by_product_warehouse = self.env['stock.warehouse.orderpoint'].read_group(
[('id', 'in', orderpoints.ids)],
['product_id', 'warehouse_id', 'qty_to_order:sum'],
['product_id', 'warehouse_id'], lazy=False)
orderpoint_by_product_warehouse = {
(record.get('product_id')[0], record.get('warehouse_id')[0]): record.get('qty_to_order')
for record in orderpoint_by_product_warehouse
}
for (product, warehouse), product_qty in to_refill.items():
qty_in_progress = qty_by_product_wh.get((product, warehouse)) or 0.0
qty_in_progress += orderpoint_by_product_warehouse.get((product, warehouse), 0.0)
# Add qty to order for other orderpoint under this warehouse.
if not qty_in_progress:
continue
to_refill[(product, warehouse)] = product_qty + qty_in_progress
to_refill = {k: v for k, v in to_refill.items() if float_compare(
v, 0.0, precision_digits=rounding) < 0.0}
lot_stock_id_by_warehouse = self.env['stock.warehouse'].search_read([
('id', 'in', [g[1] for g in to_refill.keys()])
], ['lot_stock_id'])
lot_stock_id_by_warehouse = {w['id']: w['lot_stock_id'][0] for w in lot_stock_id_by_warehouse}
# With archived ones to avoid `product_location_check` SQL constraints
orderpoint_by_product_location = self.env['stock.warehouse.orderpoint'].with_context(active_test=False).read_group(
[('id', 'in', orderpoints.ids)],
['product_id', 'location_id', 'ids:array_agg(id)'],
['product_id', 'location_id'], lazy=False)
orderpoint_by_product_location = {
(record.get('product_id')[0], record.get('location_id')[0]): record.get('ids')[0]
for record in orderpoint_by_product_location
}
orderpoint_values_list = []
for (product, warehouse), product_qty in to_refill.items():
lot_stock_id = lot_stock_id_by_warehouse[warehouse]
orderpoint_id = orderpoint_by_product_location.get((product, lot_stock_id))
if orderpoint_id:
self.env['stock.warehouse.orderpoint'].browse(orderpoint_id).qty_forecast += product_qty
else:
orderpoint_values = self.env['stock.warehouse.orderpoint']._get_orderpoint_values(product, lot_stock_id)
orderpoint_values.update({
'name': _('Replenishment Report'),
'warehouse_id': warehouse,
'company_id': self.env['stock.warehouse'].browse(warehouse).company_id.id,
})
orderpoint_values_list.append(orderpoint_values)
orderpoints = self.env['stock.warehouse.orderpoint'].with_user(SUPERUSER_ID).create(orderpoint_values_list)
for orderpoint in orderpoints:
orderpoint.route_id = orderpoint.product_id.route_ids[:1]
orderpoints.filtered(lambda o: not o.route_id)._set_default_route_id()
return action
@api.model
def _get_orderpoint_values(self, product, location):
return {
'product_id': product,
'location_id': location,
'product_max_qty': 0.0,
'product_min_qty': 0.0,
'trigger': 'manual',
}
def _get_replenishment_order_notification(self):
return False
def _quantity_in_progress(self):
"""Return Quantities that are not yet in virtual stock but should be deduced from orderpoint rule
(example: purchases created from orderpoints)"""
return dict(self.mapped(lambda x: (x.id, 0.0)))
def _prepare_procurement_values(self, date=False, group=False):
""" Prepare specific key for moves or other components that will be created from a stock rule
comming from an orderpoint. This method could be override in order to add other custom key that could
be used in move/po creation.
"""
date_planned = date or fields.Date.today()
return {
'route_ids': self.route_id,
'date_planned': date_planned,
'date_deadline': date or False,
'warehouse_id': self.warehouse_id,
'orderpoint_id': self,
'group_id': group or self.group_id,
}
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=None, raise_user_error=True):
""" Create procurements based on orderpoints.
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing
1000 orderpoints.
This is appropriate for batch jobs only.
"""
self = self.with_company(company_id)
orderpoints_noprefetch = self.read(['id'])
orderpoints_noprefetch = [orderpoint['id'] for orderpoint in orderpoints_noprefetch]
for orderpoints_batch in split_every(1000, orderpoints_noprefetch):
if use_new_cursor:
cr = registry(self._cr.dbname).cursor()
self = self.with_env(self.env(cr=cr))
orderpoints_batch = self.env['stock.warehouse.orderpoint'].browse(orderpoints_batch)
orderpoints_exceptions = []
while orderpoints_batch:
procurements = []
for orderpoint in orderpoints_batch:
if float_compare(orderpoint.qty_to_order, 0.0, precision_rounding=orderpoint.product_uom.rounding) == 1:
date = datetime.combine(orderpoint.lead_days_date, time.min)
values = orderpoint._prepare_procurement_values(date=date)
procurements.append(self.env['procurement.group'].Procurement(
orderpoint.product_id, orderpoint.qty_to_order, orderpoint.product_uom,
orderpoint.location_id, orderpoint.name, orderpoint.name,
orderpoint.company_id, values))
try:
with self.env.cr.savepoint():
self.env['procurement.group'].with_context(from_orderpoint=True).run(procurements, raise_user_error=raise_user_error)
except ProcurementException as errors:
for procurement, error_msg in errors.procurement_exceptions:
orderpoints_exceptions += [(procurement.values.get('orderpoint_id'), error_msg)]
failed_orderpoints = self.env['stock.warehouse.orderpoint'].concat(*[o[0] for o in orderpoints_exceptions])
if not failed_orderpoints:
_logger.error('Unable to process orderpoints')
break
orderpoints_batch -= failed_orderpoints
except OperationalError:
if use_new_cursor:
cr.rollback()
continue
else:
raise
else:
orderpoints_batch._post_process_scheduler()
break
# Log an activity on product template for failed orderpoints.
for orderpoint, error_msg in orderpoints_exceptions:
existing_activity = self.env['mail.activity'].search([
('res_id', '=', orderpoint.product_id.product_tmpl_id.id),
('res_model_id', '=', self.env.ref('product.model_product_template').id),
('note', '=', error_msg)])
if not existing_activity:
orderpoint.product_id.product_tmpl_id.activity_schedule(
'mail.mail_activity_data_warning',
note=error_msg,
user_id=orderpoint.product_id.responsible_id.id or SUPERUSER_ID,
)
if use_new_cursor:
cr.commit()
cr.close()
return {}
def _post_process_scheduler(self):
return True
|
rven/odoo
|
addons/stock/models/stock_orderpoint.py
|
Python
|
agpl-3.0
| 27,180
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-13 05:07
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tagging.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wolf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('excerpt', models.TextField(blank=True)),
('body', models.TextField()),
('pub_date', models.DateTimeField(default=datetime.datetime.now)),
('slug', models.SlugField(unique_for_date='pub_date')),
('enable_comments', models.BooleanField(default=True)),
('featured', models.BooleanField(default=False)),
('status', models.IntegerField(choices=[(1, 'Live'), (2, 'Draft'), (3, 'Hidden')], default=1)),
('tags', tagging.fields.TagField(blank=True, default=b'', help_text='Separate tags with spaces.', max_length=255)),
('excerpt_html', models.TextField(blank=True, editable=False)),
('body_html', models.TextField(blank=True, editable=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-pub_date'],
'verbose_name_plural': 'Entries',
},
),
migrations.AlterModelOptions(
name='category',
options={'ordering': ['title'], 'verbose_name_plural': 'Categories'},
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(help_text='Suggested value automatically generated from title. Must be unique.', unique=True),
),
migrations.AlterField(
model_name='category',
name='title',
field=models.CharField(help_text='Maximum 250 characters.', max_length=250),
),
migrations.AddField(
model_name='entry',
name='categories',
field=models.ManyToManyField(to='wolf.Category'),
),
]
|
ericchan2012/django-cms
|
cms/wolf/migrations/0002_auto_20160613_1307.py
|
Python
|
gpl-3.0
| 2,534
|
from itertools import repeat
import numpy as np
from tensorflow import keras
import sys
sys.path.append("../../Lekhaka")
import telugu as lang
from Lekhaka import Scribe, Deformer, Noiser
from Lekhaka import DataGenerator as Gen
from lab.model_by_function import build_model
from banti2chamanti import banti2chamanti
import utils
# Initialize
from default_args import scribe_args, elastic_args, noise_args
alphabet_size = len(lang.symbols)
batch_size = 32
scriber = Scribe(lang, **scribe_args)
printer = utils.Printer(lang.symbols)
deformer = Deformer(**elastic_args)
noiser = Noiser(**noise_args)
gen = Gen(scriber, deformer, noiser, batch_size)
print(scriber)
# CRNN Params
convpool_specs, convpool_wts = banti2chamanti(sys.argv[1])
num_lstm_out = 66
# Model
model = build_model(alphabet_size, scribe_args["height"], scriber.width, batch_size,
gen.labelswidth, num_lstm_out, convpool_specs, convpool_wts)
feature_model = keras.models.Model(model.get_layer(name="image").input,
[model.get_layer(name="dense1").input,
model.get_layer(name="dense1").output])
model.summary()
feature_model.summary()
from PIL import Image
def as255(v):
return (255*(v-v.min())/(v.max()-v.min())).T.astype('uint8')
def repea8(a, times):
h, w = a.shape
b = np.vstack(repeat(a, times))
return b.T.reshape((w*times, h)).T
image, labels, image_lengths, label_lengths = gen.get()
cpfs, features = feature_model.predict(image)
_, fw, fh = features.shape
print(image.shape, cpfs.shape, features.shape)
for i in range(batch_size):
img = as255(1-image[i, :, :, 0])
cpf = repea8(as255(cpfs[i]), model.width_scaled_down_by)
feat = repea8(as255(features[i]), model.width_scaled_down_by)
img = np.vstack((img, cpf, feat))
Image.fromarray(img).save(f'features/{i}_aimg.png')
|
rakeshvar/chamanti_ocr
|
lab/show_features.py
|
Python
|
apache-2.0
| 1,882
|
import os
import re
import time
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import virsh
from virttest import utils_libvirtd
from virttest import utils_config
from virttest import utils_misc
from virttest import utils_libguestfs
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt
from virttest.staging.service import Factory
from virttest.staging.utils_memory import drop_caches
def run(test, params, env):
"""
Test command: virsh managedsave.
This command can save and destroy a
running domain, so it can be restarted
from the same state at a later time.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
# define function
def vm_recover_check(option, libvirtd, check_shutdown=False):
"""
Check if the vm can be recovered correctly.
:param guest_name : Checked vm's name.
:param option : managedsave command option.
"""
# This time vm not be shut down
if vm.is_alive():
raise error.TestFail("Guest should be inactive")
# Check vm managed save state.
ret = virsh.dom_list("--managed-save --inactive")
vm_state1 = re.findall(r".*%s.*" % vm_name,
ret.stdout.strip())[0].split()[2]
ret = virsh.dom_list("--managed-save --all")
vm_state2 = re.findall(r".*%s.*" % vm_name,
ret.stdout.strip())[0].split()[2]
if vm_state1 != "saved" or vm_state2 != "saved":
raise error.TestFail("Guest state should be saved")
virsh.start(vm_name)
# This time vm should be in the list
if vm.is_dead():
raise error.TestFail("Guest should be active")
# Restart libvirtd and check vm status again.
libvirtd.restart()
if vm.is_dead():
raise error.TestFail("Guest should be active after"
" restarting libvirtd")
# Check managed save file:
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image exist "
"after starting the domain")
if option:
if option.count("running"):
if vm.is_dead() or vm.is_paused():
raise error.TestFail("Guest state should be"
" running after started"
" because of '--running' option")
elif option.count("paused"):
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of '--paused' option")
else:
if params.get("paused_after_start_vm") == "yes":
if not vm.is_paused():
raise error.TestFail("Guest state should be"
" paused after started"
" because of initia guest state")
if check_shutdown:
# Resume the domain.
if vm.is_paused():
vm.resume()
vm.wait_for_login()
# Shutdown and start the domain,
# it should be in runing state and can be login.
vm.shutdown()
vm.wait_for_shutdown()
vm.start()
vm.wait_for_login()
def vm_undefine_check(vm_name):
"""
Check if vm can be undefined with manage-save option
"""
#backup xml file
xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if not os.path.exists(managed_save_file):
raise error.TestFail("Can't find managed save image")
#undefine domain with no options.
if not virsh.undefine(vm_name, options=None,
ignore_status=True).exit_status:
raise error.TestFail("Guest shouldn't be undefined"
"while domain managed save image exists")
#undefine domain with managed-save option.
if virsh.undefine(vm_name, options="--managed-save",
ignore_status=True).exit_status:
raise error.TestFail("Guest can't be undefine with "
"managed-save option")
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image exists"
" after undefining vm")
#restore and start the vm.
xml_backup.define()
vm.start()
def check_flags_parallel(virsh_cmd, bash_cmd, flags):
"""
Run the commands parallel and check the output.
"""
cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
output = utils.run(cmd, ignore_status=True).stdout.strip()
logging.debug("check flags output: %s" % output)
lines = re.findall(r"flags:.+%s" % flags, output, re.M)
logging.debug("Find lines: %s" % lines)
if not lines:
raise error.TestFail("Checking flags %s failed" % flags)
def check_multi_guests(guests, start_delay, libvirt_guests):
"""
Check start_delay option for multiple guests.
"""
# Destroy vm first
if vm.is_alive():
vm.destroy(gracefully=False)
# Clone given number of guests
timeout = params.get("clone_timeout", 360)
for i in range(int(guests)):
dst_vm = "%s_%s" % (vm_name, i)
utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
True, timeout=timeout)
virsh.start(dst_vm)
# Wait 10 seconds for vm to start
time.sleep(10)
libvirt_guests.restart()
# libvirt-guests status command read messages from systemd
# journal, in cases of messages are not ready in time,
# add a time wait here.
def wait_func():
return not utils.run("service libvirt-guests status"
" | grep 'Resuming guest'",
ignore_status=True).exit_status
utils_misc.wait_for(wait_func, 5)
ret = utils.run("service libvirt-guests status",
ignore_status=True)
logging.info("status output: %s", ret.stdout)
pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
resume_time = re.findall(pattern, ret.stdout, re.M)
if not resume_time:
raise error.TestFail("Can't see messages of resuming guest")
# Convert time string to int
resume_seconds = [time.mktime(time.strptime(
tm, "%b %y %H:%M:%S")) for tm in resume_time]
logging.info("Resume time in seconds: %s", resume_seconds)
# Check if start_delay take effect
for i in range(len(resume_seconds)-1):
if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
raise error.TestFail("Checking start_delay failed")
def wait_for_state(vm_state):
"""
Wait for vm state is ready.
"""
utils_misc.wait_for(lambda: vm.state() == vm_state, 10)
def check_guest_flags(bash_cmd, flags):
"""
Check bypass_cache option for single guest.
"""
# Drop caches.
drop_caches()
virsh_cmd = "service libvirt-guests stop"
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"1", flags), flags)
ret = utils.run("service libvirt-guests status",
ignore_status=True)
logging.info("status output: %s", ret.stdout)
if any(["Suspending %s" % vm_name not in ret.stdout,
"stopped, with saved guests" not in ret.stdout]):
raise error.TestFail("Can't see messages of suspending vm")
# status command should return 3.
if ret.exit_status != 3:
raise error.TestFail("The exit code %s for libvirt-guests"
" status is not correct" % ret)
# Wait for VM in shut off state
wait_for_state("shut off")
virsh_cmd = "service libvirt-guests start"
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
# Wait for VM in running state
wait_for_state("running")
def vm_msave_remove_check(vm_name):
"""
Check managed save remove command.
"""
if not os.path.exists(managed_save_file):
raise error.TestFail("Can't find managed save image")
virsh.managedsave_remove(vm_name)
if os.path.exists(managed_save_file):
raise error.TestFail("Managed save image still exists")
virsh.start(vm_name)
# The domain state should be running
if vm.state() != "running":
raise error.TestFail("Guest state should be"
" running after started")
def vm_managedsave_loop(vm_name, loop_range, libvirtd):
"""
Run a loop of managedsave command and check its result.
"""
if vm.is_dead():
virsh.start(vm_name)
for i in range(int(loop_range)):
logging.debug("Test loop: %s" % i)
virsh.managedsave(vm_name)
virsh.start(vm_name)
# Check libvirtd status.
if not libvirtd.is_running():
raise error.TestFail("libvirtd is stopped after cmd")
# Check vm status.
if vm.state() != "running":
raise error.TestFail("Guest isn't in running state")
def build_vm_xml(vm_name, **dargs):
"""
Build the new domain xml and define it.
"""
try:
# stop vm before doing any change to xml
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
if dargs.get("cpu_mode"):
if "cpu" in vmxml:
del vmxml.cpu
cpuxml = vm_xml.VMCPUXML()
cpuxml.mode = params.get("cpu_mode", "host-model")
cpuxml.match = params.get("cpu_match", "exact")
cpuxml.fallback = params.get("cpu_fallback", "forbid")
cpu_topology = {}
cpu_topology_sockets = params.get("cpu_topology_sockets")
if cpu_topology_sockets:
cpu_topology["sockets"] = cpu_topology_sockets
cpu_topology_cores = params.get("cpu_topology_cores")
if cpu_topology_cores:
cpu_topology["cores"] = cpu_topology_cores
cpu_topology_threads = params.get("cpu_topology_threads")
if cpu_topology_threads:
cpu_topology["threads"] = cpu_topology_threads
if cpu_topology:
cpuxml.topology = cpu_topology
vmxml.cpu = cpuxml
if dargs.get("sec_driver"):
seclabel_dict = {"type": "dynamic", "model": "selinux",
"relabel": "yes"}
vmxml.set_seclabel([seclabel_dict])
vmxml.sync()
vm.start()
except Exception, e:
logging.error(str(e))
raise error.TestNAError("Build domain xml failed")
status_error = ("yes" == params.get("status_error", "no"))
vm_ref = params.get("managedsave_vm_ref", "name")
libvirtd_state = params.get("libvirtd", "on")
extra_param = params.get("managedsave_extra_param", "")
progress = ("yes" == params.get("managedsave_progress", "no"))
cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
test_undefine = "yes" == params.get("managedsave_undefine", "no")
test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
autostart_bypass_cache = params.get("autostart_bypass_cache", "")
multi_guests = params.get("multi_guests", "")
test_libvirt_guests = params.get("test_libvirt_guests", "")
check_flags = "yes" == params.get("check_flags", "no")
security_driver = params.get("security_driver", "")
remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
option = params.get("managedsave_option", "")
check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
pre_vm_state = params.get("pre_vm_state", "")
move_saved_file = "yes" == params.get("move_saved_file", "no")
test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
if option:
if not virsh.has_command_help_match('managedsave', option):
# Older libvirt does not have this option
raise error.TestNAError("Older libvirt does not"
" handle arguments consistently")
# Backup xml file.
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Get the libvirtd service
libvirtd = utils_libvirtd.Libvirtd()
# Get config files.
qemu_config = utils_config.LibvirtQemuConfig()
libvirt_guests_config = utils_config.LibvirtGuestsConfig()
# Get libvirt-guests service
libvirt_guests = Factory.create_service("libvirt-guests")
try:
# Destroy vm first for setting configuration file
if vm.state() == "running":
vm.destroy(gracefully=False)
# Prepare test environment.
if libvirtd_state == "off":
libvirtd.stop()
if autostart_bypass_cache:
ret = virsh.autostart(vm_name, "", ignore_status=True)
libvirt.check_exit_status(ret)
qemu_config.auto_start_bypass_cache = autostart_bypass_cache
libvirtd.restart()
if security_driver:
qemu_config.security_driver = [security_driver]
if test_libvirt_guests:
if multi_guests:
start_delay = params.get("start_delay", "20")
libvirt_guests_config.START_DELAY = start_delay
if check_flags:
libvirt_guests_config.BYPASS_CACHE = "1"
# The config file format should be "x=y" instead of "x = y"
utils.run("sed -i -e 's/ = /=/g' "
"/etc/sysconfig/libvirt-guests")
libvirt_guests.restart()
# Change domain xml.
if cpu_mode:
build_vm_xml(vm_name, cpu_mode=True)
if security_driver:
build_vm_xml(vm_name, sec_driver=True)
# Turn VM into certain state.
if pre_vm_state == "transient":
logging.info("Creating %s..." % vm_name)
vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
if vm.is_alive():
vm.destroy(gracefully=False)
vm.undefine()
if virsh.create(vmxml_for_test.xml, ignore_status=True).exit_status:
vmxml_backup.define()
raise error.TestNAError("Cann't create the domain")
# Wait for vm in stable state
if params.get("start_vm") == "yes":
if vm.state() == "shut off":
vm.start()
vm.wait_for_login()
# run test case
domid = vm.get_id()
domuuid = vm.get_uuid()
if vm_ref == "id":
vm_ref = domid
elif vm_ref == "uuid":
vm_ref = domuuid
elif vm_ref == "hex_id":
vm_ref = hex(int(domid))
elif vm_ref.count("invalid"):
vm_ref = params.get(vm_ref)
elif vm_ref == "name":
vm_ref = vm_name
# Ignore exception with "ignore_status=True"
if progress:
option += " --verbose"
option += extra_param
# For bypass_cache test. Run a shell command to check fd flags while
# excuting managedsave command
bash_cmd = ("let i=1; while((i++<200)); do if [ -e %s ]; then (cat /proc"
"/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
"grep 'flags:.*%s') && break; else sleep 0.1; fi; done;")
# Flags to check bypass cache take effect
flags = "014"
if test_bypass_cache:
# Drop caches.
drop_caches()
virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"1", flags), flags)
# Wait for VM in shut off state
wait_for_state("shut off")
virsh_cmd = "virsh start %s %s" % (option, vm_name)
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
# Wait for VM in running state
wait_for_state("running")
elif test_libvirt_guests:
logging.debug("libvirt-guests status: %s", libvirt_guests.status())
if multi_guests:
check_multi_guests(multi_guests,
start_delay, libvirt_guests)
if check_flags:
check_guest_flags(bash_cmd, flags)
else:
ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
status = ret.exit_status
# The progress information outputed in error message
error_msg = ret.stderr.strip()
if move_saved_file:
cmd = "echo > %s" % managed_save_file
utils.run(cmd)
# recover libvirtd service start
if libvirtd_state == "off":
libvirtd.start()
if status_error:
if not status:
raise error.TestFail("Run successfully with wrong command!")
else:
if status:
raise error.TestFail("Run failed with right command")
if progress:
if not error_msg.count("Managedsave:"):
raise error.TestFail("Got invalid progress output")
if remove_after_cmd:
vm_msave_remove_check(vm_name)
elif test_undefine:
vm_undefine_check(vm_name)
elif autostart_bypass_cache:
libvirtd.stop()
virsh_cmd = ("(service libvirtd start)")
check_flags_parallel(virsh_cmd, bash_cmd %
(managed_save_file, managed_save_file,
"0", flags), flags)
elif test_loop_cmd:
loop_range = params.get("loop_range", "20")
vm_managedsave_loop(vm_name, loop_range, libvirtd)
else:
vm_recover_check(option, libvirtd, check_shutdown)
finally:
# Restore test environment.
if vm.is_paused():
virsh.resume(vm_name)
elif vm.is_dead():
vm.start()
# Wait for VM in running state
wait_for_state("running")
if autostart_bypass_cache:
virsh.autostart(vm_name, "--disable",
ignore_status=True)
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml_backup.sync()
if multi_guests:
for i in range(int(multi_guests)):
virsh.remove_domain("%s_%s" % (vm_name, i),
"--remove-all-storage")
qemu_config.restore()
libvirt_guests_config.restore()
libvirtd.restart()
|
will-Do/tp-libvirt_v2v
|
libvirt/tests/src/virsh_cmd/domain/virsh_managedsave.py
|
Python
|
gpl-2.0
| 19,904
|
# dog http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02084071
# cat
import shutil
import requests
import uuid
import os
dog_url="http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02084071"
dog_response=requests.get(dog_url, stream=True)
dogs=dog_response.text.split()
for url in dogs:
response = requests.get(url, stream=True)
file_name= '{}.jpg'.format(uuid.uuid4())
with open( os.path.join("dataset","dog", file_name), 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
|
wasit7/cs634
|
2017/cs634_project/image_loader.py
|
Python
|
bsd-2-clause
| 537
|
#! /usr/bin/env python
import sys
g = {}
n = {}
for line in sys.stdin:
(n1, n2, p, q, t, tg, x) = line.strip().split(' ')
t = int(t)
x = float(x)
key = ' '.join((n1,n2,p,q))
if not key in n:
n[key] = 0
g[key] = 0
n[key] += t
g[key] += x*t
for key in n:
print key, n[key], g[key]/n[key]
|
vbeffara/Simulations
|
tools/massage-box.py
|
Python
|
gpl-3.0
| 341
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class TriggerPaged(Paged):
"""
A paging container for iterating over a list of :class:`Trigger <azure.mgmt.edgegateway.models.Trigger>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Trigger]'}
}
def __init__(self, *args, **kwargs):
super(TriggerPaged, self).__init__(*args, **kwargs)
|
Azure/azure-sdk-for-python
|
sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/trigger_paged.py
|
Python
|
mit
| 919
|
# Copyright (c) 2016 PyWPS Project Steering Committee
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pywps import Process, LiteralInput, LiteralOutput
from pywps.app.Common import Metadata
class Sleep(Process):
SUCCESS_MESSAGE = 'done sleeping'
def __init__(self):
inputs = [LiteralInput('delay',
'Delay between every update',
data_type='float')]
outputs = [LiteralOutput('sleep_output',
'Sleep Output',
data_type='string')]
super(Sleep, self).__init__(
self._handler,
identifier='sleep',
version='None',
title='Sleep Process',
abstract="The process will sleep for a given delay \
or 10 seconds if not a valid value",
profile='',
metadata=[Metadata('Sleep'), Metadata('Wait'), Metadata('Delay')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
import time
sleep_delay = request.inputs['delay'][0].data
if sleep_delay:
sleep_delay = float(sleep_delay)
else:
sleep_delay = 10
time.sleep(sleep_delay)
response.update_status('PyWPS Process started. Waiting...', 20)
time.sleep(sleep_delay)
response.update_status('PyWPS Process started. Waiting...', 40)
time.sleep(sleep_delay)
response.update_status('PyWPS Process started. Waiting...', 60)
time.sleep(sleep_delay)
response.update_status('PyWPS Process started. Waiting...', 80)
time.sleep(sleep_delay)
response.outputs['sleep_output'].data = self.SUCCESS_MESSAGE
return response
def main():
"""Example of how to debug this process, running outside a PyWPS instance.
"""
sleep = Sleep()
(request, response) = sleep.build_request_response()
literal_in = sleep.inputs[0]
literal_in.data = 10
request.inputs["delay"].append(literal_in)
sleep._handler(request, response)
assert response.outputs["sleep_output"].data == sleep.SUCCESS_MESSAGE
print("All good!")
if __name__ == "__main__":
main()
|
PyWPS/pywps-4-demo
|
processes/sleep.py
|
Python
|
mit
| 3,360
|
# Weights layers
from .cauchysimilarity import CauchySimilarity
from .dropout import Dropout
from .embed import Embed
from .expand_window import expand_window
from .hashembed import HashEmbed
from .layernorm import LayerNorm
from .linear import Linear
from .lstm import LSTM, PyTorchLSTM
from .logistic import Logistic
from .maxout import Maxout
from .mish import Mish
from .multisoftmax import MultiSoftmax
from .parametricattention import ParametricAttention
from .pytorchwrapper import PyTorchWrapper, PyTorchWrapper_v2
from .pytorchwrapper import PyTorchRNNWrapper
from .relu import Relu
from .resizable import resizable
from .sigmoid_activation import sigmoid_activation
from .sigmoid import Sigmoid
from .softmax_activation import softmax_activation
from .softmax import Softmax
from .sparselinear import SparseLinear
from .tensorflowwrapper import TensorFlowWrapper, keras_subclass
from .mxnetwrapper import MXNetWrapper
# Combinators
from .add import add
from .bidirectional import bidirectional
from .chain import chain
from .clone import clone
from .concatenate import concatenate
from .map_list import map_list
from .noop import noop
from .residual import residual
from .uniqued import uniqued
from .siamese import siamese
from .tuplify import tuplify
# Pooling
from .reduce_first import reduce_first
from .reduce_last import reduce_last
from .reduce_max import reduce_max
from .reduce_mean import reduce_mean
from .reduce_sum import reduce_sum
# Array manipulation
from .array_getitem import array_getitem
# Data-type transfers
from .list2array import list2array
from .list2ragged import list2ragged
from .list2padded import list2padded
from .ragged2list import ragged2list
from .padded2list import padded2list
from .remap_ids import remap_ids
from .strings2arrays import strings2arrays
from .with_array import with_array
from .with_array2d import with_array2d
from .with_cpu import with_cpu
from .with_flatten import with_flatten
from .with_padded import with_padded
from .with_list import with_list
from .with_ragged import with_ragged
from .with_reshape import with_reshape
from .with_getitem import with_getitem
from .with_debug import with_debug
from .with_nvtx_range import with_nvtx_range
__all__ = [
"CauchySimilarity",
"Linear",
"Dropout",
"Embed",
"expand_window",
"HashEmbed",
"LayerNorm",
"LSTM",
"Maxout",
"Mish",
"MultiSoftmax",
"ParametricAttention",
"PyTorchLSTM",
"PyTorchWrapper",
"PyTorchWrapper_v2",
"PyTorchRNNWrapper",
"Relu",
"sigmoid_activation",
"Sigmoid"
"softmax_activation",
"Softmax",
"SparseLinear",
"TensorFlowWrapper",
"add",
"bidirectional",
"chain",
"clone",
"concatenate",
"noop",
"residual",
"uniqued",
"siamese",
"reduce_first",
"reduce_last",
"reduce_max",
"reduce_mean",
"reduce_sum",
"resizable",
"list2array",
"list2ragged",
"list2padded",
"ragged2list",
"padded2list",
"with_reshape",
"with_getitem",
"with_array",
"with_array2d",
"with_cpu",
"with_list",
"with_ragged",
"with_padded",
"with_flatten",
"with_debug",
"with_nvtx_range",
"remap_ids",
]
|
spacy-io/thinc
|
thinc/layers/__init__.py
|
Python
|
mit
| 3,230
|
# Standard Library Imports
import logging
# 3rd Party Imports
from twilio.rest import TwilioRestClient
# Local Imports
from PokeAlarm.Alarms import Alarm
from PokeAlarm.Utils import parse_boolean, require_and_remove_key, \
reject_leftover_parameters
log = logging.getLogger('Twilio')
try_sending = Alarm.try_sending
replace = Alarm.replace
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ATTENTION! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ONLY EDIT THIS FILE IF YOU KNOW WHAT YOU ARE DOING!
# You DO NOT NEED to edit this file to customize messages! Please ONLY EDIT the
# the 'alarms.json'. Failing to do so can cause other feature to break!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ATTENTION! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
class TwilioAlarm(Alarm):
_defaults = {
'monsters': {
'message': "A wild <mon_name> has appeared! <gmaps> "
"Available until <24h_time> (<time_left>)."
},
'stops': {
'message': "Someone has placed a lure on a Pokestop! <gmaps>"
"Lure will expire at <24h_time> (<time_left>)."
},
'gyms': {
'message': "A Team <old_team> gym has fallen! <gmaps>"
"It is now controlled by <new_team>."
},
'eggs': {
'message': "A level <egg_lvl> raid is incoming! <gmaps>"
"Egg hatches <24h_hatch_time> (<hatch_time_left>)."
},
'raids': {
'message': "Level <raid_lvl> raid against <mon_name>! <gmaps>"
" Available until <24h_raid_end> (<raid_time_left>)."
}
}
# Gather settings and create alarm
def __init__(self, settings):
# Required Parameters
self.__account_sid = require_and_remove_key(
'account_sid', settings, "'Twilio' type alarms.")
self.__auth_token = require_and_remove_key(
'auth_token', settings, "'Twilio' type alarms.")
self.__from_number = require_and_remove_key(
'from_number', settings, "'Twilio' type alarms.")
self.__to_number = require_and_remove_key(
'to_number', settings, "'Twilio' type alarms.")
self.__client = None
# Optional Alarm Parameters
self.__startup_message = parse_boolean(
settings.pop('startup_message', "True"))
# Optional Alert Parameters
self.__pokemon = self.set_alert(
settings.pop('monsters', {}), self._defaults['monsters'])
self.__pokestop = self.set_alert(
settings.pop('stops', {}), self._defaults['stops'])
self.__gym = self.set_alert(
settings.pop('gyms', {}), self._defaults['gyms'])
self.__egg = self.set_alert(
settings.pop('eggs', {}), self._defaults['eggs'])
self.__raid = self.set_alert(
settings.pop('raids', {}), self._defaults['raids'])
# Warn user about leftover parameters
reject_leftover_parameters(settings, "'Alarm level in Twilio alarm.")
log.info("Twilio Alarm has been created!")
# (Re)establishes Twilio connection
def connect(self):
self.__client = TwilioRestClient(self.__account_sid, self.__auth_token)
# Send a message letting the channel know that this alarm started
def startup_message(self):
if self.__startup_message:
self.send_sms(
to_num=self.__to_number,
from_num=self.__from_number,
body="PokeAlarm activated!"
)
log.info("Startup message sent!")
# Set the appropriate settings for each alert
def set_alert(self, settings, default):
alert = {
'to_number': settings.pop('to_number', self.__to_number),
'from_number': settings.pop('from_number', self.__from_number),
'message': settings.pop('message', default['message'])
}
reject_leftover_parameters(settings, "'Alert level in Twilio alarm.")
return alert
# Send Pokemon Info
def send_alert(self, alert, info):
self.send_sms(
to_num=alert['to_number'],
from_num=alert['from_number'],
body=replace(alert['message'], info)
)
# Trigger an alert based on Pokemon info
def pokemon_alert(self, pokemon_info):
self.send_alert(self.__pokemon, pokemon_info)
# Trigger an alert based on Pokestop info
def pokestop_alert(self, pokestop_info):
self.send_alert(self.__pokestop, pokestop_info)
# Trigger an alert based on Gym info
def gym_alert(self, gym_info):
self.send_alert(self.__gym, gym_info)
# Trigger an alert when a raid egg has spawned (UPCOMING raid event)
def raid_egg_alert(self, raid_info):
self.send_alert(self.__egg, raid_info)
# Trigger an alert based on Raid info
def raid_alert(self, raid_info):
self.send_alert(self.__raid, raid_info)
# Send a SMS message
def send_sms(self, to_num, from_num, body):
if not isinstance(to_num, list):
to_num = [to_num]
for num in to_num:
args = {
'to': num,
'from_': from_num,
'body': body
}
try_sending(
log, self.connect, "Twilio",
self.__client.messages.create, args)
|
blsmit5728/PokeAlarm
|
PokeAlarm/Alarms/Twilio/TwilioAlarm.py
|
Python
|
agpl-3.0
| 5,378
|
from django.db import models
from django.utils import timezone
import json
CHARFIELD_LEN_SMALL=128
CHARFIELD_LEN_LONG = 1024
class CreatedModel(models.Model):
#When it was created/modified
modified=models.DateTimeField(auto_now=True)
created=models.DateTimeField(auto_now_add=True)
#Properties of the problem the model was created with
max_score=models.IntegerField()
prompt=models.TextField()
problem = models.ForeignKey("freeform_data.Problem")
target_number = models.IntegerField(default=0)
#Properties of the model file
model_relative_path=models.CharField(max_length=CHARFIELD_LEN_LONG)
model_full_path=models.CharField(max_length=CHARFIELD_LEN_LONG)
#Properties of the model itself
number_of_essays=models.IntegerField()
#CV is cross-validation, which is a statistical technique that ensures that
#the models are trained on one part of the data and predicted on another.
#so the kappa and error measurements are not biased by the data that was used to create the models
#being used to also evaluate them. (ie, this is "True" error)
#Kappa is interrater agreement-closer to 1 is better.
#If the actual scores and the predicted scores perfectly agree, kappa will be 1.
cv_kappa=models.DecimalField(max_digits=10,decimal_places=9, default=1)
#Mean absolute error is mean(abs(actual_score-predicted_score))
#A mean absolute error of .5 means that, on average, the predicted score is +/- .5 points from the actual score
cv_mean_absolute_error=models.DecimalField(max_digits=15,decimal_places=10, default=1)
creation_succeeded=models.BooleanField(default=False)
creation_started =models.BooleanField(default=False)
#Amazon S3 stuff if we do use it
model_stored_in_s3=models.BooleanField(default=False)
s3_public_url=models.TextField(default="")
s3_bucketname=models.TextField(default="")
def get_submission_ids_used(self):
"""
Returns a list of submission ids of essays used to create the model.
Output:
Boolean success, list of ids/error message as appropriate
"""
try:
submission_id_list=json.loads(self.submission_ids_used)
except:
return False, "No essays used or not in json format."
return True, submission_id_list
|
vdt/ml-service-api
|
ml_grading/models.py
|
Python
|
agpl-3.0
| 2,344
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yevent.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
BdEINSALyon/yevent
|
manage.py
|
Python
|
gpl-3.0
| 804
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import sys
import re
from copy import copy
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestQueryMemLimitScaling(ImpalaTestSuite):
"""Test class to do functional validation of per query memory limits. """
QUERY = ["select * from lineitem where l_orderkey = -1",
"select min(l_orderkey) from lineitem",
"select * from lineitem order by l_orderkey limit 1"]
# These query take 400mb-1gb if no mem limits are set
MEM_LIMITS = ["-1", "400m", "150m"]
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestQueryMemLimitScaling, cls).add_test_dimensions()
# add mem_limit as a test dimension.
new_dimension = TestDimension('mem_limit', *TestQueryMemLimitScaling.MEM_LIMITS)
cls.TestMatrix.add_dimension(new_dimension)
if cls.exploration_strategy() != 'exhaustive':
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
# Test running with different mem limits to exercise the dynamic memory
# scaling functionality.
def test_mem_usage_scaling(self, vector):
mem_limit = copy(vector.get_value('mem_limit'))
table_format = vector.get_value('table_format')
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = mem_limit
for query in self.QUERY:
self.execute_query(query, exec_options, table_format=table_format)
class TestExprMemUsage(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestExprMemUsage, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
if cls.exploration_strategy() != 'exhaustive':
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
def test_scanner_mem_usage(self, vector):
exec_options = vector.get_value('exec_option')
# This value was picked empircally based on the query.
exec_options['mem_limit'] = '300m'
self.execute_query_expect_success(self.client,
"select count(*) from lineitem where lower(l_comment) = 'hello'", exec_options,
table_format=vector.get_value('table_format'))
class TestMemLimitError(ImpalaTestSuite):
# Different values of mem limits and minimum mem limit the queries are expected to run
# without problem.
MEM_IN_MB = [20, 50, 100, 115, 120, 125, 140, 145, 150]
MIN_MEM_FOR_TPCH_Q1 = 145
MIN_MEM_FOR_TPCH_Q4 = 150
EXPECTED_ERROR_MSG = "Memory limit exceeded"
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestMemLimitError, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(
TestDimension('mem_limit', *TestMemLimitError.MEM_IN_MB))
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
@classmethod
def setup_class(cls):
super(TestMemLimitError, cls).setup_class()
cls.client.execute('compute stats tpch_parquet.lineitem');
cls.client.execute('compute stats tpch_parquet.orders');
def test_low_mem_limit_q1(self, vector):
mem = vector.get_value('mem_limit')
# If memory limit larger than the minimum threshold, then it is not expected to fail
expects_error = mem < TestMemLimitError.MIN_MEM_FOR_TPCH_Q1;
new_vector = copy(vector)
new_vector.get_value('exec_option')['mem_limit'] = str(mem) + "m"
try:
self.run_test_case('tpch-q1', new_vector)
except ImpalaBeeswaxException as e:
if (expects_error == 0):
raise
if (TestMemLimitError.EXPECTED_ERROR_MSG in str(e)):
print str(e)
assert TestMemLimitError.EXPECTED_ERROR_MSG in str(e)
def test_low_mem_limit_q4(self, vector):
mem = vector.get_value('mem_limit')
# If memory limit larger than the minimum threshold, then it is not expected to fail
expects_error = mem < TestMemLimitError.MIN_MEM_FOR_TPCH_Q4;
new_vector = copy(vector)
new_vector.get_value('exec_option')['mem_limit'] = str(mem) + "m"
try:
self.run_test_case('tpch-q4', new_vector)
except ImpalaBeeswaxException as e:
if (expects_error == 0):
raise
if (TestMemLimitError.EXPECTED_ERROR_MSG in str(e)):
print str(e)
assert TestMemLimitError.EXPECTED_ERROR_MSG in str(e)
|
grundprinzip/Impala
|
tests/query_test/test_mem_usage_scaling.py
|
Python
|
apache-2.0
| 5,147
|
import unittest
from solution import *
class Test_228_Easy(unittest.TestCase):
def test_order(self):
self.assertEqual(order("billowy"), "IN ORDER")
self.assertEqual(order("defaced"), "NOT IN ORDER")
self.assertEqual(order("sponged"), "REVERSE ORDER")
if __name__ == "__main__":
unittest.main()
|
marcardioid/DailyProgrammer
|
solutions/228_Easy/test_solution.py
|
Python
|
mit
| 327
|
#!/usr/bin/python
import gst_player
import clock
import time
import gobject
import glob
import os
import random
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# some global variables, will be moved elsewhere in the future probably
SUPPORTED_AUDIO_FORMATS = ['.mp3', '.ogg', '.flac']
MUSIC_FOLDER = './music'
def generate_random_music_file_list(music_folder):
try:
music_file_list = []
for filename in glob.glob(os.path.join(music_folder, '*.*')):
if os.path.splitext(filename)[1] in SUPPORTED_AUDIO_FORMATS:
#music_file_list.append(os.path.realpath(filename))
# insert the files on random positions
music_file_list.insert(random.randrange(len(music_file_list)+1), os.path.realpath(filename))
return music_file_list
except IOError as e:
print 'Error when generating the music file list from the directory {0}, {[1]}'.format(MUSIC_FOLDER, e)
except Exception, e:
pass
def main():
# the main part of the program
# # Get audio files from the music folder in random order
# music_files = generate_random_music_file_list(MUSIC_FOLDER)
# print music_files
#player = gst_player.Player(music_files)
#player.play()
# for vol_lvl in range(1, 20):
# player.set_volume(vol_lvl*0.1)
# time.sleep(2)
# initialize the led clock
alarm_clock = clock.LedAlarmClock()
# set up some alarm
print 'Alarm ringing:', alarm_clock.check_if_alarm_on()
print 'Alarm enabled', alarm_clock.is_alarm_active()
# define some callbacks
# the callback which reacts on the clock pulse
def clock_alarm_callback(channel):
print 'Alarm ringing:', alarm_clock.check_if_alarm_on()
print 'Alarm enabled', alarm_clock.is_alarm_active()
alarm_clock.on_alarm()
# the callback which reacts on the alarm
def clock_pulse_callback(channel):
#print "Clock tick"
alarm_clock.update_display()
# assign the callbacks to the GPIO events
GPIO.add_event_detect(4, GPIO.FALLING, callback=clock_alarm_callback, bouncetime=100)
GPIO.add_event_detect(17, GPIO.FALLING, callback=clock_pulse_callback, bouncetime=100)
try:
h, m = input('Enter the alarm hour and minute (with a comma in between hh,mm): ')
alarm_clock.set_alarm_time(h, m)
print 'Daily alarm set for {0}:{1}'.format(h, m)
alarm_clock.activate_alarm()
print 'Alarm enabled', alarm_clock.is_alarm_active()
raw_input('Press Enter to exit\n>')
#loop = gobject.MainLoop()
#player.set_loop(loop)
#loop.run()
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
#player.set_volume(0.,1)
#Execution starts here
if __name__ == '__main__':
main()
print 'END!'
GPIO.cleanup()
|
karlitos/Raspberry-Wake-Up-Light
|
main.py
|
Python
|
mit
| 2,963
|
import nose
from nose.plugins.attrib import attr
import logging
import colorguard
import os
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
@attr(speed='slow')
def test_cromu_00070_caching():
# Test exploitation of CROMU_00070 given an input which causes a leak. Then test that we can do it again restoring
# from the cache.
for _ in range(2):
payload = bytes.fromhex("06000006020a00000000000000000000000c030c00000100e1f505000000000000eb")
cg = colorguard.ColorGuard(os.path.join(bin_location, "tests/cgc/CROMU_00070"), payload)
pov = cg.attempt_exploit()
nose.tools.assert_not_equal(pov, None)
nose.tools.assert_true(pov.test_binary())
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("colorguard").setLevel("DEBUG")
logging.getLogger("povsim").setLevel("DEBUG")
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
mechaphish/colorguard
|
tests/test_cromu70_caching.py
|
Python
|
bsd-2-clause
| 1,245
|
"""Support for MyQ-Enabled Garage Doors."""
import logging
import time
from pymyq.const import (
DEVICE_STATE as MYQ_DEVICE_STATE,
DEVICE_STATE_ONLINE as MYQ_DEVICE_STATE_ONLINE,
DEVICE_TYPE as MYQ_DEVICE_TYPE,
DEVICE_TYPE_GATE as MYQ_DEVICE_TYPE_GATE,
KNOWN_MODELS,
MANUFACTURER,
)
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_GATE,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPENING,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_call_later
from .const import (
DOMAIN,
MYQ_COORDINATOR,
MYQ_GATEWAY,
MYQ_TO_HASS,
TRANSITION_COMPLETE_DURATION,
TRANSITION_START_DURATION,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
# This parameter is no longer used; keeping it to avoid a breaking change in
# a hotfix, but in a future main release, this should be removed:
vol.Optional(CONF_TYPE): cv.string,
},
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
},
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
async_add_entities(
[MyQDevice(coordinator, device) for device in myq.covers.values()], True
)
class MyQDevice(CoverEntity):
"""Representation of a MyQ cover."""
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
self._coordinator = coordinator
self._device = device
self._last_action_timestamp = 0
self._scheduled_transition_update = None
@property
def device_class(self):
"""Define this cover as a garage door."""
device_type = self._device.device_json.get(MYQ_DEVICE_TYPE)
if device_type is not None and device_type == MYQ_DEVICE_TYPE_GATE:
return DEVICE_CLASS_GATE
return DEVICE_CLASS_GARAGE
@property
def name(self):
"""Return the name of the garage door if any."""
return self._device.name
@property
def available(self):
"""Return if the device is online."""
if not self._coordinator.last_update_success:
return False
# Not all devices report online so assume True if its missing
return self._device.device_json[MYQ_DEVICE_STATE].get(
MYQ_DEVICE_STATE_ONLINE, True
)
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSED
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_OPENING
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._device.device_id
async def async_close_cover(self, **kwargs):
"""Issue close command to cover."""
self._last_action_timestamp = time.time()
await self._device.close()
self._async_schedule_update_for_transition()
async def async_open_cover(self, **kwargs):
"""Issue open command to cover."""
self._last_action_timestamp = time.time()
await self._device.open()
self._async_schedule_update_for_transition()
@callback
def _async_schedule_update_for_transition(self):
self.async_write_ha_state()
# Cancel any previous updates
if self._scheduled_transition_update:
self._scheduled_transition_update()
# Schedule an update for when we expect the transition
# to be completed so the garage door or gate does not
# seem like its closing or opening for a long time
self._scheduled_transition_update = async_call_later(
self.hass,
TRANSITION_COMPLETE_DURATION,
self._async_complete_schedule_update,
)
async def _async_complete_schedule_update(self, _):
"""Update status of the cover via coordinator."""
self._scheduled_transition_update = None
await self._coordinator.async_request_refresh()
async def async_update(self):
"""Update status of cover."""
await self._coordinator.async_request_refresh()
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self._device.name,
"manufacturer": MANUFACTURER,
"sw_version": self._device.firmware_version,
}
model = KNOWN_MODELS.get(self._device.device_id[2:4])
if model:
device_info["model"] = model
if self._device.parent_device_id:
device_info["via_device"] = (DOMAIN, self._device.parent_device_id)
return device_info
@callback
def _async_consume_update(self):
if time.time() - self._last_action_timestamp <= TRANSITION_START_DURATION:
# If we just started a transition we need
# to prevent a bouncy state
return
self.async_write_ha_state()
@property
def should_poll(self):
"""Return False, updates are controlled via coordinator."""
return False
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self._coordinator.async_add_listener(self._async_consume_update)
)
async def async_will_remove_from_hass(self):
"""Undo subscription."""
if self._scheduled_transition_update:
self._scheduled_transition_update()
|
nkgilley/home-assistant
|
homeassistant/components/myq/cover.py
|
Python
|
apache-2.0
| 6,868
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .datacatalog_metadata_cleaner import DataCatalogMetadataCleaner
__all__ = ('DataCatalogMetadataCleaner',)
|
GoogleCloudPlatform/datacatalog-connectors
|
google-datacatalog-connectors-commons/src/google/datacatalog_connectors/commons/cleanup/__init__.py
|
Python
|
apache-2.0
| 708
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import cubit
cubit.init([""])
try:
from geocubitlib import boundary_definition
from geocubitlib import cubit2specfem3d
except:
import boundary_definition
import cubit2specfem3d
# two volumes separating 134000x134000x60000 block horizontally
cubit.cmd('reset')
cubit.cmd('brick x 67000 y 134000 z 60000')
cubit.cmd('volume 1 move x 33500 y 67000 z -30000')
cubit.cmd('brick x 67000 y 134000 z 60000')
cubit.cmd('volume 2 move x 100500 y 67000 z -30000')
cubit.cmd('merge all')
# Meshing the volumes
elementsize = 3750.0
cubit.cmd('volume 1 size '+str(elementsize))
cubit.cmd('volume 2 size '+str(elementsize))
cubit.cmd('mesh volume 1 2')
#### End of meshing
###### This is boundary_definition.py of GEOCUBIT
#..... which extracts the bounding faces and defines them into blocks
boundary_definition.entities=['face']
boundary_definition.define_bc(boundary_definition.entities,parallel=True)
#### Define material properties for the 3 volumes ################
cubit.cmd('#### DEFINE MATERIAL PROPERTIES #######################')
cubit.cmd('block 1 name "elastic" ') # elastic material region
cubit.cmd('block 1 attribute count 6')
cubit.cmd('block 1 attribute index 1 1') # flag for material: 1 for 1. material
cubit.cmd('block 1 attribute index 2 2800') # vp
cubit.cmd('block 1 attribute index 3 1500') # vs
cubit.cmd('block 1 attribute index 4 2300') # rho
cubit.cmd('block 1 attribute index 5 9000.0') # Qmu
cubit.cmd('block 1 attribute index 6 1 ') # anisotropy_flag
cubit.cmd('block 2 name "elastic" ') # elastic material region
cubit.cmd('block 2 attribute count 6')
cubit.cmd('block 2 attribute index 1 1') # flag for material: 1 for 1. material
cubit.cmd('block 2 attribute index 2 2800') # vp
cubit.cmd('block 2 attribute index 3 1500') # vs
cubit.cmd('block 2 attribute index 4 2300') # rho
cubit.cmd('block 2 attribute index 5 9000.0') # Q_mu
cubit.cmd('block 2 attribute index 6 0 ') # anisotropy_flag
cubit.cmd('export mesh "top.e" dimension 3 overwrite')
cubit.cmd('save as "meshing.cub" overwrite')
#### Export to SPECFEM3D format using cubit2specfem3d.py of GEOCUBIT
os.system('mkdir -p MESH')
cubit2specfem3d.export2SPECFEM3D('MESH')
# all files needed by SCOTCH are now in directory MESH
|
geodynamics/specfem3d
|
EXAMPLES/homogeneous_halfspace_HEX8_elastic_no_absorbing/block_mesh-anisotropic.py
|
Python
|
gpl-3.0
| 2,355
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
#
# FIXME: Supports e.g.
# MULTIPOINT (10 40, 40 30, 20 20, 30 10)
#
# but not:
# MULTIPOINT ((10 40), (40 30), (20 20), (30 10))
#
from spyne.model import SimpleModel
from spyne.model.primitive.string import Unicode
FLOAT_PATTERN = r'-?[0-9]+\.?[0-9]*(e-?[0-9]+)?'
_rinse_and_repeat = r'\s*\(%s\s*(,\s*%s)*\)\s*'
def _get_one_point_pattern(dim):
return ' +'.join([FLOAT_PATTERN] * dim)
def _get_point_pattern(dim):
return r'POINT\s*\(%s\)' % _get_one_point_pattern(dim)
def _get_one_multipoint_pattern(dim):
one_point = _get_one_point_pattern(dim)
return _rinse_and_repeat % (one_point, one_point)
def _get_multipoint_pattern(dim):
return r'MULTIPOINT\s*%s' % _get_one_multipoint_pattern(dim)
def _get_one_line_pattern(dim):
one_point = _get_one_point_pattern(dim)
return _rinse_and_repeat % (one_point, one_point)
def _get_linestring_pattern(dim):
return r'LINESTRING\s*%s' % _get_one_line_pattern(dim)
def _get_one_multilinestring_pattern(dim):
one_line = _get_one_line_pattern(dim)
return _rinse_and_repeat % (one_line, one_line)
def _get_multilinestring_pattern(dim):
return r'MULTILINESTRING\s*%s' % _get_one_multilinestring_pattern(dim)
def _get_one_polygon_pattern(dim):
one_line = _get_one_line_pattern(dim)
return _rinse_and_repeat % (one_line, one_line)
def _get_polygon_pattern(dim):
return r'POLYGON\s*%s' % _get_one_polygon_pattern(dim)
def _get_one_multipolygon_pattern(dim):
one_line = _get_one_polygon_pattern(dim)
return _rinse_and_repeat % (one_line, one_line)
def _get_multipolygon_pattern(dim):
return r'MULTIPOLYGON\s*%s' % _get_one_multipolygon_pattern(dim)
class Point(Unicode):
"""A point type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper point type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
@staticmethod
def Value(x, y, prec=15):
return ('POINT(%%3.%(prec)sf %%3.%(prec)sf)' % {'prec': prec}) % (x,y)
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_point_pattern(dim)
kwargs['type_name'] = 'point%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
class Line(Unicode):
"""A line type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper line type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_linestring_pattern(dim)
kwargs['type_name'] = 'line%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
LineString = Line
class Polygon(Unicode):
"""A polygon type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper polygon type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_polygon_pattern(dim)
kwargs['type_name'] = 'polygon%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
class MultiPoint(Unicode):
"""A MultiPoint type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper MultiPoint type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_multipoint_pattern(dim)
kwargs['type_name'] = 'multiPoint%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
class MultiLine(Unicode):
"""A MultiLine type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper MultiLine type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_multilinestring_pattern(dim)
kwargs['type_name'] = 'multiLine%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
MultiLineString = MultiLine
class MultiPolygon(Unicode):
"""A MultiPolygon type whose native format is a WKT string. You can use
:func:`shapely.wkt.loads` to get a proper MultiPolygon type.
It's a subclass of the :class:`Unicode` type, so regular Unicode constraints
apply. The only additional parameter is the number of dimensions.
:param dim: Number of dimensons.
"""
__type_name__ = None
class Attributes(Unicode.Attributes):
dim = None
def __new__(cls, dim=None, **kwargs):
assert dim in (None, 2, 3)
if dim is not None:
kwargs['dim'] = dim
kwargs['pattern'] = _get_multipolygon_pattern(dim)
kwargs['type_name'] = 'multipolygon%dd' % dim
retval = SimpleModel.__new__(cls, **kwargs)
retval.__namespace__ = 'http://spyne.io/schema'
retval.__extends__ = Unicode
retval.__orig__ = Unicode
return retval
|
deevarvar/myLab
|
baidu_code/soap_mockserver/spyne/model/primitive/geo.py
|
Python
|
mit
| 8,026
|
import unittest
from mox import MoxTestBase, IsA
from slimta.relay.smtp.static import StaticSmtpRelay
from slimta.relay.smtp.client import SmtpRelayClient
class TestStaticSmtpRelay(MoxTestBase, unittest.TestCase):
def test_add_client(self):
static = StaticSmtpRelay('testhost')
ret = static.add_client()
self.assertIsInstance(ret, SmtpRelayClient)
def test_add_client_custom(self):
def fake_class(addr, queue, **kwargs):
self.assertEqual(('testhost', 25), addr)
return 'success'
static = StaticSmtpRelay('testhost', client_class=fake_class)
self.assertEqual('success', static.add_client())
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
slimta/python-slimta
|
test/test_slimta_relay_smtp_static.py
|
Python
|
mit
| 713
|
import sys
import unittest
from argparse import ArgumentParser
from collections import namedtuple
from pprint import pprint
from unittest.mock import patch
from unittest.mock import MagicMock
from python_bindings import constants
from tests.harness import instrumentGooey
from gooey.tests import *
class TestGooeyApplication(unittest.TestCase):
def testFullscreen(self):
parser = self.basicParser()
for shouldShow in [True, False]:
with self.subTest('Should set full screen: {}'.format(shouldShow)):
with instrumentGooey(parser, fullscreen=shouldShow) as (app, frame, gapp):
self.assertEqual(frame.IsFullScreen(), shouldShow)
@patch("gui.containers.application.modals.confirmForceStop")
def testGooeyRequestsConfirmationWhenShowStopWarningModalTrue(self, mockModal):
"""
When show_stop_warning=False, Gooey should immediately kill the
running program without additional user confirmation.
Otherwise, Gooey should show a confirmation modal and, dependending on the
user's choice, either do nothing or kill the running program.
"""
Case = namedtuple('Case', ['show_warning', 'shouldSeeConfirm', 'userChooses', 'shouldHaltProgram'])
testcases = [
Case(show_warning=True, shouldSeeConfirm=True, userChooses=True, shouldHaltProgram=True),
Case(show_warning=True, shouldSeeConfirm=True, userChooses=False, shouldHaltProgram=False),
Case(show_warning=False, shouldSeeConfirm=False, userChooses='N/A', shouldHaltProgram=True),
]
for case in testcases:
mockModal.reset_mock()
parser = self.basicParser()
with instrumentGooey(parser, show_stop_warning=case.show_warning) as (app, frame, gapp):
mockClientRunner = MagicMock()
mockModal.return_value = case.userChooses
gapp.clientRunner = mockClientRunner
gapp.handleInterrupt()
if case.shouldSeeConfirm:
mockModal.assert_called()
else:
mockModal.assert_not_called()
if case.shouldHaltProgram:
mockClientRunner.stop.assert_called()
else:
mockClientRunner.stop.assert_not_called()
# @patch("gui.containers.application.modals.confirmForceStop")
# def testOnCloseShutsDownActiveClients(self, mockModal):
# """
# Issue 592: Closing the UI should clean up any actively running programs
# """
# parser = self.basicParser()
# with instrumentGooey(parser) as (app, frame):
# frame.clientRunner = MagicMock()
# frame.destroyGooey = MagicMock()
# # mocking that the user clicks "yes shut down" in the warning modal
# mockModal.return_value = True
# frame._instance.handleClose()
#
# mockModal.assert_called()
# frame.destroyGooey.assert_called()
def testTerminalColorChanges(self):
## Issue #625 terminal panel color wasn't being set due to a typo
parser = self.basicParser()
expectedColors = [(255, 0, 0, 255), (255, 255, 255, 255), (100, 100, 100,100)]
for expectedColor in expectedColors:
with instrumentGooey(parser, terminal_panel_color=expectedColor) as (app, frame, gapp):
foundColor = gapp.consoleRef.instance.GetBackgroundColour()
self.assertEqual(tuple(foundColor), expectedColor)
def testFontWeightsGetSet(self):
## Issue #625 font weight wasn't being correctly passed to the terminal
for weight in [constants.FONTWEIGHT_LIGHT, constants.FONTWEIGHT_BOLD]:
parser = self.basicParser()
with instrumentGooey(parser, terminal_font_weight=weight) as (app, frame, gapp):
terminal = gapp.consoleRef.instance.textbox
self.assertEqual(terminal.GetFont().GetWeight(), weight)
def testProgressBarHiddenWhenDisabled(self):
options = [
{'disable_progress_bar_animation': True},
{'disable_progress_bar_animation': False},
{}
]
for kwargs in options:
parser = self.basicParser()
with instrumentGooey(parser, **kwargs) as (app, frame, gapp):
mockClientRunner = MagicMock()
frame.clientRunner = mockClientRunner
# transition's Gooey to the running state using the now mocked processor.
# so that we can make assertions about the visibility of footer buttons
gapp.onStart()
# the progress bar flag is awkwardly inverted (is_disabled, rather than
# is_enabled). Thus inverting the expectation here. When disabled is true,
# shown should be False,
expect_shown = not kwargs.get('disable_progress_bar_animation', False)
self.assertEqual(gapp.state['progress']['show'], expect_shown)
def basicParser(self):
parser = ArgumentParser()
parser.add_argument('--foo')
return parser
if __name__ == '__main__':
unittest.main()
|
chriskiehl/Gooey
|
gooey/tests/test_application.py
|
Python
|
mit
| 5,268
|
# -*- coding: utf-8 -*-
import os
import glob
# Import Data Resampling Class from user library
from lib.pyoval import CS2RefOrbit
from lib.pyoval import CS2OrbitResData
from lib.data import read_embird_dat
from lib.data import read_OIB
from lib.helpers import EmptyObject
class PathInfo():
""" Definition of pathes and filenames for tests """
def __init__(self):
# Path to reference orbit files
self.dir = EmptyObject()
self.dir.workspace = '/Volumes/Data/OneDrive/CryoVal-SI/GoldenDays/'
self.dir.SampleData = os.path.join(self.dir.workspace,'20130424')
self.dir.RefOrbit = self.dir.SampleData
self.dir.RefCCOrbit = self.dir.SampleData
self.dir.SampleFigures = self.dir.SampleData
# Files
self.file = EmptyObject()
#self.file.example_orbit = r'Location_file_21011_20140326T090003_20140326T102353.txt'
#self.file.example_des_orbit = r'Location_file_10520_20120402T110853_20120402T124403_L1B_vBC.txt'
#self.file.example_aem = r'20140321_allfinal.dat'
self.file.example_oib = r'OIB_20130424_IDCSI2.txt'
def example_calc_corner_coordinates(orbit=10520,lon_limit=[-180.1, 180.1], lat_limit=[60., 88.]):
"""Calculate Corner Coordinates from reference orbit files"""
# Read examplary reference orbit data (center locations of footprint)
# Initialize RefOrbit Class
# Required for setting projection, initial ROI etc.
info = PathInfo()
reforbit = CS2RefOrbit()
# Limit reference orbit data to region of interest
# (Speed things up for corner coordinate caluclation and resampling)
# ! Needs to be called before from_file
# This example: Lincoln Sea (CryoVex 2011)
# Note: longitude limits in range between -180 and 180
reforbit.set_sin_detection(False)
reforbit.limit_region(lat_limit=lat_limit, lon_limit=lon_limit)
reforbit_file= str(os.path.join(info.dir.RefOrbit, info.file.example_orbit))
print reforbit_file
# Read center coordinates (files provided from UCL for CryoVal-SI)
# Input parameter: Full file name
reforbit.from_file(reforbit_file)
reforbit.debug_map()
# Save corner coordinates to file (in specified folder)
reforbit.to_CCfile(folder=info.dir.RefCCOrbit)
def example_resample_oib(orbit=10520):
"""
Example of how to resample arbitrary validation data onto
CryoSat-2 footprints (no drift correction applied)
"""
#===================================================================================
# 1) Get Corner coordinates
#===================================================================================
# Read Reference coorner coordinates from ascii file
# Retrieve corner coordinates from orbit 5399
# (Example, can be customized)
info = PathInfo()
cc_file = glob.glob(os.path.join(info.dir.RefCCOrbit, '*'+str(orbit)+'*'))[0]
# Initialize reference orbit data object and read corner coordinate file
reforbit = CS2RefOrbit()
reforbit.from_file(cc_file, corner_coords=True)
# Example: Read EM-Bird data
# Data structure of Cal/Val data can be arbitrary as long as
# * data is organized as vectors/array with identical shape
# * arrays exist for time, longitude, latitude, value (e.g. thickness, freeboard, ....)
# * time information is available as python datetime object (time zone UTC())
oib_file = os.path.join(info.dir.SampleData, info.file.example_oib)
print oib_file
oib = read_OIB(oib_file)
#===================================================================================
# 2) Resample Cal/Val Data to CryoSat-2 orbits
#===================================================================================
# Initialize object
# Required for projection and sanity checks
resdata = CS2OrbitResData()
# Add the Orbit Corner Coordinates
resdata.add_cs2orbit(reforbit)
# Add the Cal/Val data
resdata.add_calval_data(oib.dt_time, # time info as datetime object vector
oib.longitude, # longitude vector
oib.latitude, # latitude vector
oib.freeboard) # cal/val data vector
# make use of pre-defined bins from progress meeting 1
# user bins can be added by the method pyoval.CS2OrbitResData.set_pdf_bins(bins)
resdata.set_data_type('freeboard')
# Resample Cal/Val Data
resdata.resample()
#===================================================================================
# 3) Generate output (data files and summary plots)
#===================================================================================
# Define Labels etc for output
resdata.set_data_label('oibql')
resdata.set_data_extlabel("OIB freeboard")
resdata.set_parameter_extlabel('Freeboard')
resdata.set_data_unit('Meter')
# Write ASCII data output
resdata.to_file(info.dir.SampleFigures)
# Create summary plot (in specified folder)
resdata.summary_plot(info.dir.SampleFigures)
def example_resample_embird(orbit=10520):
"""
Example of how to resample arbitrary validation data onto
CryoSat-2 footprints (no drift correction applied)
"""
#===================================================================================
# 1) Get Corner coordinates
#===================================================================================
# Read Reference coorner coordinates from ascii file
# Retrieve corner coordinates from orbit 5399
# (Example, can be customized)
info = PathInfo()
cc_file = glob.glob(os.path.join(info.dir.RefCCOrbit, '*'+str(orbit)+'*'))[0]
# Initialize reference orbit data object and read corner coordinate file
reforbit = CS2RefOrbit()
reforbit.from_file(cc_file, corner_coords=True)
# Example: Read EM-Bird data
# Data structure of Cal/Val data can be arbitrary as long as
# * data is organized as vectors/array with identical shape
# * arrays exist for time, longitude, latitude, value (e.g. thickness, freeboard, ....)
# * time information is available as python datetime object (time zone UTC())
aem_file = os.path.join(info.dir.SampleData, info.file.example_aem)
aem = read_embird_dat(aem_file)
#===================================================================================
# 2) Resample Cal/Val Data to CryoSat-2 orbits
#===================================================================================
# Initialize object
# Required for projection and sanity checks
resdata = CS2OrbitResData()
# Add the Orbit Corner Coordinates
resdata.add_cs2orbit(reforbit)
# Add the Cal/Val data
resdata.add_calval_data(aem.dt_time, # time info as datetime object vector
aem.longitude, # longitude vector
aem.latitude, # latitude vector
aem.thickness) # cal/val data vector
# make use of pre-defined bins from progress meeting 1
# user bins can be added by the method pyoval.CS2OrbitResData.set_pdf_bins(bins)
resdata.set_data_type('thickness')
# Resample Cal/Val Data
resdata.resample()
#===================================================================================
# 3) Generate output (data files and summary plots)
#===================================================================================
# Define Labels etc for output
resdata.set_data_label('aem')
resdata.set_data_extlabel("EM-Bird sea-ice thickness")
resdata.set_parameter_extlabel('Sea-Ice Thickness')
resdata.set_data_unit('Meter')
# Write ASCII data output
resdata.to_file(info.dir.SampleFigures)
# Create summary plot (in specified folder)
resdata.summary_plot(info.dir.SampleFigures)
def test_asscending_orbit():
""" Test of SARin position corrections for descending orbits """
info = PathInfo()
reforbit = CS2RefOrbit()
reforbit.set_sin_detection(False)
reforbit.limit_region(lat_limit=[82., 88.], lon_limit=[-120., -40.])
reforbit.from_file(os.path.join(info.dir.RefOrbit, info.file.example_orbit))
reforbit.to_CCfile(folder=info.dir.RefCCOrbit)
def test_descending_orbit():
""" Test of SARin position corrections for descending orbits """
info = PathInfo()
reforbit = CS2RefOrbit()
reforbit.set_sin_detection(False)
reforbit.limit_region(lat_limit=[82., 88.], lon_limit=[-120., -40.])
reforbit.from_file(os.path.join(info.dir.RefOrbit, info.file.example_des_orbit))
reforbit.to_CCfile(folder=info.dir.RefCCOrbit)
if __name__ == '__main__':
#example_calc_corner_coordinates(orbit=21011,
# lat_limit=[60., 88.],
# lon_limit=[-180.1, 180.1])
#example_resample_embird(orbit=16139)
#example_resample_embird(orbit=21077)
example_resample_oib(orbit=16139)
#example_resample_oib(orbit=21092)
#test_descending_orbit()
#test_asscending_orbit()
|
shendric/pyoval
|
tests_resample_uofa_orbits.py
|
Python
|
gpl-3.0
| 9,128
|
""" steward_salt client commands """
from pprint import pprint
def do_salt(client, tgt, cmd, *args, **kwargs):
"""
Run a command using salt
Parameters
----------
tgt : str
The servers to run the command on
cmd : str
The salt module to run
timeout : int, optional
How long to wait for a response from the minions
expr_form : str, optional
The format of the target str (default 'glob')
args : list
List of positional arguments to pass to the command
kwargs : dict
Dict of keyword arguments to pass to the command
"""
timeout = kwargs.pop('timeout', None)
expr_form = kwargs.pop('expr_form', None)
data = {
'tgt': tgt,
'cmd': cmd,
'arg': args,
'kwarg': kwargs,
}
if timeout is not None:
data['timeout'] = timeout
if expr_form is not None:
data['expr_form'] = expr_form
response = client.cmd('salt', **data)
pprint(response.json())
def do_salt_ssh(client, tgt, cmd, *args, **kwargs):
"""
Run a command using salt
Parameters
----------
tgt : str
The servers to run the command on
cmd : str
The salt module to run
timeout : int, optional
How long to wait for a response from the minions
expr_form : str, optional
The format of the target str (default 'glob')
args : list
List of positional arguments to pass to the command
kwargs : dict
Dict of keyword arguments to pass to the command
"""
timeout = kwargs.pop('timeout', None)
expr_form = kwargs.pop('expr_form', None)
data = {
'tgt': tgt,
'cmd': cmd,
'arg': args,
'kwarg': kwargs,
}
if timeout is not None:
data['timeout'] = timeout
if expr_form is not None:
data['expr_form'] = expr_form
response = client.cmd('salt/ssh', **data)
pprint(response.json())
def do_salt_call(client, mod, *args, **kwargs):
"""
Run a salt command on the server
Parameters
----------
mod : str
The salt module to run
*args : list
List of positional arguments to pass to the command
**kwargs : dict
Dict of keyword arguments to pass to the command
"""
response = client.cmd('salt/call', cmd=mod, arg=args, kwarg=kwargs)
pprint(response.json())
def do_omnishell(client):
""" Activate the OMNISHELL """
from .omnishell import load_dotfile, SaltTerm
def delegate(transport, tgt, cmd, arg, timeout, expr_form):
""" Delegates commands to the salt endpoints """
if transport == 'zmq':
return client.cmd('salt', tgt=tgt, cmd=cmd, arg=arg,
timeout=timeout, expr_form=expr_form).json()
elif transport == 'ssh':
return client.cmd('salt/ssh', tgt=tgt, cmd=cmd, arg=arg,
timeout=timeout, expr_form=expr_form).json()
else:
return "Unknown transport '%s'" % transport
SaltTerm().start(delegate, load_dotfile())
|
mathcamp/steward_salt
|
steward_salt/client.py
|
Python
|
mit
| 3,087
|
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
VERSION = '1.2'
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True)
}
obj_relationships = {
'cpu_topology': [('1.2', '1.0')]
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_topology' not in kwargs:
self.cpu_topology = None
self.obj_reset_changes(['cpu_topology'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_pinning is not None
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
VERSION = '1.2'
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
}
obj_relationships = {
'cells': [('1.0', '1.0'), ('1.2', '1.2')],
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
self._save()
# NOTE(ndipanov): We can't rename create and want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
def _save(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
# NOTE(ndipanov): We want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
@classmethod
def delete_by_instance_uuid(cls, context, instance_uuid):
values = {'numa_topology': None}
db.instance_extra_update_by_uuid(context, instance_uuid,
values)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
|
scripnichenko/nova
|
nova/objects/instance_numa_topology.py
|
Python
|
apache-2.0
| 7,900
|
# Copyright 2012 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import DoesNotExist, MultipleObjectsReturned
from utils import parse_query
from utils import QueryStringQuery
from store import ElasticsearchClient
from queryset import ElasticsearchQueryset
class ElasticsearchUtils(object):
"""
useful but scary methods not appropriate to be on the model/managers
"""
_client = None
_index = None
_type = None
def __init__(self, model_cls):
self._client = model_cls._client
self._index = model_cls._get_index()
self._type = model_cls._get_doctype()
def delete_index(self):
return self._client.delete_index(self._index)
def create_index(self):
return self._client.create_index(self._index)
def optimize(self):
return self._client.optimize(self._index)
def refresh(self):
return self._client.refresh(self._index)
def delete_all_documents(self):
return self.delete_by_query("*:*")
def delete_by_query(self, query):
return self._client.delete_by_query(self._index, self._type, QueryStringQuery(query).to_json())
class ElasticsearchManager(object):
"""
base manager class for elasticsearch documents
"""
_model = None
def _get_queryset(self, query):
return ElasticsearchQueryset(self._model, query)
def filter(self, query_string=None, **kwargs):
return self._get_queryset(parse_query(query_string, **kwargs))
# TODO: Allow getting documents directly by id, saving the query
def get(self, query_string=None, **kwargs):
qs = self._get_queryset(parse_query(query_string, **kwargs))
if qs.count() > 1:
raise MultipleObjectsReturned
if qs.count() < 1:
raise DoesNotExist
return qs[0]
def create(self, **kwargs):
return self._model(**kwargs).save()
def all(self):
return self._get_queryset(parse_query('*:*'))
class ElasticsearchBaseModel(type):
# triumph of metaprogramming </sarcasm>
# this feels lame, but the only way I could figure out how to generically
# define the base manager class to know what model it was for but not require an instance of
# said model.
def __getattribute__(mcs, name):
# intercept the manager and populate it with the correct class
if name == 'objects':
objects = type.__getattribute__(mcs, name)
objects._model = mcs
return objects
else:
# otherwise, business as ususal
return type.__getattribute__(mcs, name)
class ElasticsearchModel(object):
"""
object representing an elasticsearch document.
Works pretty much exactly like the django model/manager pattern
except you don't specify attributes for the model because everything
is all schemaless and shit.
TODO: object mappings
TODO: document validation
TODO: document primary keys? (hardcoded to 'id')
"""
__metaclass__ = ElasticsearchBaseModel
_document = None
_client = ElasticsearchClient()
objects = ElasticsearchManager()
def __init__(self, **kwargs):
self._document = {}
for k, v in kwargs.iteritems():
self._document[k] = v
def __setattr__(self, name, value):
if name.startswith('_'):
super(ElasticsearchModel, self).__setattr__(name, value)
else:
self._document[name] = value
def __getattr__(self, name):
try:
return self._document.get(name)
except KeyError:
raise AttributeError
def __str__(self):
return 'instance at %s' % hex(id(self))
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__str__())
def __dict__(self):
return self._document
@classmethod
def _get_index(cls):
return '%ss' % cls.__name__.lower()
@classmethod
def _get_doctype(cls):
return cls.__name__.lower()
def save(self):
self._client.index(self._document, self._get_index(), self._get_doctype())
return self
|
racker/slogger
|
elasticsearch/core/models.py
|
Python
|
apache-2.0
| 4,651
|
# -*- coding: utf-8 -*-
###############################################################################
#
# MoneyReceived
# Retrieves a list of successful charge events.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class MoneyReceived(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the MoneyReceived Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(MoneyReceived, self).__init__(temboo_session, '/Library/Stripe/Events/MoneyReceived')
def new_input_set(self):
return MoneyReceivedInputSet()
def _make_result_set(self, result, path):
return MoneyReceivedResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return MoneyReceivedChoreographyExecution(session, exec_id, path)
class MoneyReceivedInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the MoneyReceived
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe)
"""
super(MoneyReceivedInputSet, self)._set_input('APIKey', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) A limit on the number of events to be returned. Count can range between 1 and 100 items. Defaults to 10.)
"""
super(MoneyReceivedInputSet, self)._set_input('Count', value)
def set_Created(self, value):
"""
Set the value of the Created input for this Choreo. ((optional, date) Filters the result based on the event created date (a UTC timestamp).)
"""
super(MoneyReceivedInputSet, self)._set_input('Created', value)
def set_GreaterThanEqualTo(self, value):
"""
Set the value of the GreaterThanEqualTo input for this Choreo. ((optional, date) Returns events that have been created after or equal to this UTC timestamp.)
"""
super(MoneyReceivedInputSet, self)._set_input('GreaterThanEqualTo', value)
def set_GreaterThan(self, value):
"""
Set the value of the GreaterThan input for this Choreo. ((optional, date) Returns events that have been created after this UTC timestamp.)
"""
super(MoneyReceivedInputSet, self)._set_input('GreaterThan', value)
def set_LessThanEqualTo(self, value):
"""
Set the value of the LessThanEqualTo input for this Choreo. ((optional, date) Return events that were created before or equal to this UTC timestamp.)
"""
super(MoneyReceivedInputSet, self)._set_input('LessThanEqualTo', value)
def set_LessThan(self, value):
"""
Set the value of the LessThan input for this Choreo. ((optional, date) Return events that were created before this UTC timestamp.)
"""
super(MoneyReceivedInputSet, self)._set_input('LessThan', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) An offset into your events array. The API will return the requested number of events starting at that offset.)
"""
super(MoneyReceivedInputSet, self)._set_input('Offset', value)
def set_ResponseMode(self, value):
"""
Set the value of the ResponseMode input for this Choreo. ((optional, string) Used to simplify the response. Valid values are: simple and verbose. When set to simple, an array of charge amounts is returned. Verbose mode returns an array of charge objects. Defaults to "simple".)
"""
super(MoneyReceivedInputSet, self)._set_input('ResponseMode', value)
class MoneyReceivedResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the MoneyReceived Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe)
"""
return self._output.get('Response', None)
def get_TotalCount(self):
"""
Retrieve the value for the "TotalCount" output from this Choreo execution. ((integer) The total number of results. This can be used to determine whether or not you need to retrieve the next page of results.)
"""
return self._output.get('TotalCount', None)
class MoneyReceivedChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return MoneyReceivedResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Stripe/Events/MoneyReceived.py
|
Python
|
apache-2.0
| 5,733
|
###############################################################################
# Copyright (c) Intel Corporation - All rights reserved. #
# This file is part of the LIBXSMM library. #
# #
# For information on the license, see the LICENSE file. #
# Further information: https://github.com/hfp/libxsmm/ #
# SPDX-License-Identifier: BSD-3-Clause #
###############################################################################
# Hans Pabst (Intel Corp.)
###############################################################################
import sphinx_rtd_theme
import os
project = 'LIBXSMM'
copyright = '2009-2021, Intel Corporation.'
author = 'Intel Corporation'
user = os.environ.get('USER')
extensions = [
#"recommonmark",
"m2r2"
]
master_doc = "index"
source_suffix = [
".rst",
#".md"
]
exclude_patterns = [
"*-" + user + "-*.md",
"Thumbs.db",
".DS_Store",
"_build"
]
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"navigation_depth": 2
}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ["../.theme"]
templates_path = ["_templates"]
pygments_style = "sphinx"
language = None
|
hfp/libxsmm
|
documentation/conf.py
|
Python
|
bsd-3-clause
| 1,370
|
#!/usr/bin/python
def message(to, text):
print "this is ", to, ":\n", text
def add(a, b):
return a + b;
message('xichen', 'eyu')
print add(1,2);
def mul(a, b):
return a * b;
print mul(2, 3);
print mul('a', 3);
print mul(b=2, a='dd');
print 2 ** 100;
print message;
func = add;
print func(1, 2);
|
cxsjabc/basic
|
python/message.py
|
Python
|
agpl-3.0
| 308
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for :module:'heat.engine.clients.os.cinder'."""
import uuid
import mock
from heat.common import exception
from heat.engine.clients.os import cinder
from heat.tests import common
from heat.tests import utils
class CinderClientPluginTest(common.HeatTestCase):
"""Basic tests for :module:'heat.engine.clients.os.cinder'."""
def setUp(self):
super(CinderClientPluginTest, self).setUp()
self.cinder_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.cinder_plugin = c.client_plugin('cinder')
self.cinder_plugin._client = self.cinder_client
def test_get_volume(self):
"""Tests the get_volume function."""
volume_id = str(uuid.uuid4())
my_volume = mock.MagicMock()
self.cinder_client.volumes.get.return_value = my_volume
self.assertEqual(my_volume, self.cinder_plugin.get_volume(volume_id))
self.cinder_client.volumes.get.assert_called_once_with(volume_id)
def test_get_snapshot(self):
"""Tests the get_volume_snapshot function."""
snapshot_id = str(uuid.uuid4())
my_snapshot = mock.MagicMock()
self.cinder_client.volume_snapshots.get.return_value = my_snapshot
self.assertEqual(my_snapshot,
self.cinder_plugin.get_volume_snapshot(snapshot_id))
self.cinder_client.volume_snapshots.get.assert_called_once_with(
snapshot_id)
class VolumeConstraintTest(common.HeatTestCase):
def setUp(self):
super(VolumeConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get_volume = mock.Mock()
self.ctx.clients.client_plugin(
'cinder').get_volume = self.mock_get_volume
self.constraint = cinder.VolumeConstraint()
def test_validation(self):
self.mock_get_volume.return_value = None
self.assertTrue(self.constraint.validate("foo", self.ctx))
def test_validation_error(self):
self.mock_get_volume.side_effect = exception.EntityNotFound(
entity='Volume', name='bar')
self.assertFalse(self.constraint.validate("bar", self.ctx))
class VolumeSnapshotConstraintTest(common.HeatTestCase):
def setUp(self):
super(VolumeSnapshotConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get_snapshot = mock.Mock()
self.ctx.clients.client_plugin(
'cinder').get_volume_snapshot = self.mock_get_snapshot
self.constraint = cinder.VolumeSnapshotConstraint()
def test_validation(self):
self.mock_get_snapshot.return_value = 'snapshot'
self.assertTrue(self.constraint.validate("foo", self.ctx))
def test_validation_error(self):
self.mock_get_snapshot.side_effect = exception.EntityNotFound(
entity='VolumeSnapshot', name='bar')
self.assertFalse(self.constraint.validate("bar", self.ctx))
class VolumeTypeConstraintTest(common.HeatTestCase):
def setUp(self):
super(VolumeTypeConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get_volume_type = mock.Mock()
self.ctx.clients.client_plugin(
'cinder').get_volume_type = self.mock_get_volume_type
self.constraint = cinder.VolumeTypeConstraint()
def test_validation(self):
self.mock_get_volume_type.return_value = 'volume_type'
self.assertTrue(self.constraint.validate("foo", self.ctx))
def test_validation_error(self):
self.mock_get_volume_type.side_effect = exception.EntityNotFound(
entity='VolumeType', name='bar')
self.assertFalse(self.constraint.validate("bar", self.ctx))
class VolumeBackupConstraintTest(common.HeatTestCase):
def setUp(self):
super(VolumeBackupConstraintTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get_volume_backup = mock.Mock()
self.ctx.clients.client_plugin(
'cinder').get_volume_backup = self.mock_get_volume_backup
self.constraint = cinder.VolumeBackupConstraint()
def test_validation(self):
self.mock_get_volume_backup.return_value = 'volume_backup'
self.assertTrue(self.constraint.validate("foo", self.ctx))
def test_validation_error(self):
ex = exception.EntityNotFound(entity='Volume backup', name='bar')
self.mock_get_volume_backup.side_effect = ex
self.assertFalse(self.constraint.validate("bar", self.ctx))
|
jasondunsmore/heat
|
heat/tests/clients/test_cinder_client.py
|
Python
|
apache-2.0
| 5,085
|
# -*-python-*-
#
# Copyright (C) 1999-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
import sys
import time
import types
import re
import compat
import MySQLdb
# set to 1 to store commit times in UTC, or 0 to use the ViewVC machine's
# local timezone. Using UTC is recommended because it ensures that the
# database will remain valid even if it is moved to another machine or the host
# computer's time zone is changed. UTC also avoids the ambiguity associated
# with daylight saving time (for example if a computer in New York recorded the
# local time 2002/10/27 1:30 am, there would be no way to tell whether the
# actual time was recorded before or after clocks were rolled back). Use local
# times for compatibility with databases used by ViewCVS 0.92 and earlier
# versions.
utc_time = 1
def DateTimeFromTicks(ticks):
"""Return a MySQL DATETIME value from a unix timestamp"""
if utc_time:
t = time.gmtime(ticks)
else:
t = time.localtime(ticks)
return "%04d-%02d-%02d %02d:%02d:%02d" % t[:6]
_re_datetime = re.compile('([0-9]{4})-([0-9][0-9])-([0-9][0-9]) '
'([0-9][0-9]):([0-9][0-9]):([0-9][0-9])')
def TicksFromDateTime(datetime):
"""Return a unix timestamp from a MySQL DATETIME value"""
if type(datetime) == types.StringType:
# datetime is a MySQL DATETIME string
matches = _re_datetime.match(datetime).groups()
t = tuple(map(int, matches)) + (0, 0, 0)
elif hasattr(datetime, "timetuple"):
# datetime is a Python >=2.3 datetime.DateTime object
t = datetime.timetuple()
else:
# datetime is an eGenix mx.DateTime object
t = datetime.tuple()
if utc_time:
return compat.timegm(t)
else:
return time.mktime(t[:8] + (-1,))
def connect(host, port, user, passwd, db):
return MySQLdb.connect(host=host, port=port, user=user, passwd=passwd, db=db)
|
marcellodesales/svnedge-console
|
svn-server/lib/viewvc/dbi.py
|
Python
|
agpl-3.0
| 2,184
|
from bucket.local import LocalProvider
import config
import statestore
import logging
import os
import threading
import traceback
import messages
from send2trash import send2trash
from worker import BaseWorker
class Download(BaseWorker):
def __init__(self, objectStore, outputQueue):
BaseWorker.__init__(self)
self.objectStore = objectStore
self.outputQueue = outputQueue
self.localStore = LocalProvider()
c = config.Config()
self.localSyncPath = c.get_home_folder()
self.tempDownloadFolder = c.get_temporary_folder()
self.state = statestore.StateStore(c.username)
self.lock = threading.Lock()
self.running = True
self.trashFolder = c.get_trash_folder()
def stop(self):
logging.info('Download::stop')
self.objectStore.stop()
self.running = False
def _get_working_message(self):
return messages.Status('Looking for files to download')
def perform(self):
# get the current directory
#logging.debug('Download::perform')
self.outputQueue.put(self._get_working_message())
files = self.objectStore.list_dir(None)
for f in files:
if not self.running:
break
#logging.debug('f.path = %r' % f.path)
if f.isFolder:
if f.name == self.trashFolder:
# we don't download the trash folder
continue
else:
skipChildren = self.download_folder(f)
# if we deleted a bunch of stuff - it might
# mean our files list is out of wack
# so lets rather just break out - and restart
# next time round
if skipChildren:
logging.info('break')
break
else:
self.download_file(f)
self.outputQueue.put(messages.Status('Local files up to date'))
def download_file(self, f):
localPath = self.get_local_path(f.path)
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
#logging.debug('does not exist: %s' % localPath)
if self.already_synced_file(f.path):
# if we've already downloaded this file,
# it means we have to delete it remotely!
logging.info('delete remote version of %s' % localPath)
self.delete_remote_file(f.path)
else:
# lets get the file
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name
# exists, delete it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path, f.hash, localMD)
self.outputQueue.put(self._get_working_message())
else:
# the file already exists - do we overwrite it?
syncInfo = self.state.getObjectSyncInfo(f.path)
if syncInfo:
localMD = self.localStore.get_last_modified_date(localPath)
if syncInfo.dateModified != localMD:
# the dates differ! we need to calculate the hash!
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash != f.hash:
# hmm - ok, if the online one, has the same hash
# as I synced, then it means the local file
# has changed!
if syncInfo.hash == f.hash:
# online and synced have the same version!
# that means the local one has changed
# so we're not downloading anything
# the upload process should handle this
pass
else:
logging.warn('TODO: the files differ - which '
'one do I use?')
else:
# all good - the files are the same
# we can update our local sync info
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# dates are the same, so we can assume the hash
# hasn't changed
if syncInfo.hash != f.hash:
# if the sync info is the same as the local file
# then it must mean the remote file has changed!
get_file_info = self.localStore.get_file_info
localFileInfo = get_file_info(localPath)
if localFileInfo.hash == syncInfo.hash:
self.replace_file(f, localPath)
else:
logging.info('remote hash: %r' % f.hash)
logging.info('local hash: %r' % localFileInfo.hash)
logging.info('sync hash: %r' % syncInfo.hash)
logging.warn('sync hash differs from local hash!')
else:
# sync hash is same as remote hash, and the file date
# hasn't changed. we assume this to mean, there have
# been no changes
pass
else:
# TODO: we need to do something here!
# the file exists locally, and remotely - but we don't have any
# record of having downloaded it
localFileInfo = self.localStore.get_file_info(localPath)
if localFileInfo.hash == f.hash:
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
localFileInfo.hash,
localMD)
else:
# we don't have any history of this file - and the hash
# from local differs from remote! WHAT DO WE DO!
logging.error('TODO: HASH differs! Which is which????: %r'
% f.path)
pass
def replace_file(self, f, localPath):
self._set_hadWorkToDo(True)
head, tail = os.path.split(localPath)
self.outputQueue.put(messages.Status('Downloading %s' % tail))
tmpFile = self.get_tmp_filename()
if os.path.exists(tmpFile):
# if a temporary file with the same name exists, remove it
os.remove(tmpFile)
self.objectStore.download_object(f.path, tmpFile)
send2trash(localPath)
os.rename(tmpFile, localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(f.path,
f.hash,
localMD)
self.outputQueue.put(self._get_working_message())
def get_tmp_filename(self):
return os.path.join(self.tempDownloadFolder, 'tmpfile')
def download_folder(self, folder):
if not self.running:
# return true, to indicate that children can be skipped
return True
# does the folder exist locally?
#logging.debug('download_folder(%s)' % folder.path)
localPath = self.get_local_path(folder.path)
downloadFolderContents = True
skipChildren = False
if not os.path.exists(localPath):
self._set_hadWorkToDo(True)
# the path exists online, but NOT locally
# we do one of two things, we either
# a) delete it remotely
# if we know for a fact we've already downloaded this folder,
# then it not being here, can only mean we've deleted it
# b) download it
# if we haven't marked this folder as being downloaded,
# then we get it now
if self.already_downloaded_folder(folder.path):
logging.info('we need to delete %r!' % folder.path)
self.delete_remote_folder(folder.path)
downloadFolderContents = False
skipChildren = True
logging.info('done deleting remote folder')
else:
#logging.info('creating: %r' % localPath)
os.makedirs(localPath)
localMD = self.localStore.get_last_modified_date(localPath)
self.state.markObjectAsSynced(folder.path,
None,
localMD)
#logging.info('done creating %r' % localPath)
if downloadFolderContents:
try:
#logging.debug('downloading folder
# 'contents for %s' % folder.path)
files = self.objectStore.list_dir(folder.path)
#logging.debug('got %r files' % len(files))
for f in files:
if folder.path.strip('/') != f.path.strip('/'):
if f.isFolder:
skipChildren = self.download_folder(f)
if skipChildren:
break
else:
self.download_file(f)
except:
logging.error('failed to download %s' % folder.path)
logging.error(traceback.format_exc())
return skipChildren
def get_local_path(self, remote_path):
return os.path.join(self.localSyncPath, remote_path)
def already_downloaded_folder(self, path):
""" Establish if this folder was downloaded before
typical use: the folder doesn't exist locally, but it
does exist remotely - that would imply that if we'd already
downloaded it, it can only be missing if it was deleted, and
thusly, we delete it remotely.
"""
alreadySynced = False
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
# if we have sync info for this path - it means we've
# already download
# or uploaded it
logging.info('we have sync info for %s' % path)
alreadySynced = True
else:
# if we don't have sync info for this path
# - it means we haven't downloaded it yet
#logging.info('no sync info for %s' % path)
pass
return alreadySynced
def already_synced_file(self, path):
""" See: already_downloaded_folder
"""
syncInfo = self.state.getObjectSyncInfo(path)
if syncInfo:
remoteFileInfo = self.objectStore.get_file_info(path)
if remoteFileInfo.hash == syncInfo.hash:
# the hash of the file we synced, is the
# same as the one online.
# this means, we've already synced this file!
return True
return False
else:
return False
def delete_remote_folder(self, path):
logging.info('delete_remote_folder(path = %r)' % path)
# a folder has children - and we need to remove those!
self._set_hadWorkToDo(True)
children = self.objectStore.list_dir(path)
#for child in children:
# logging.info('%s [child] %s' % (path, child.path))
for child in children:
if child.isFolder:
# remove this child folder
self.delete_remote_folder(child.path)
else:
# remove this child file
self.delete_remote_file(child.path)
logging.info('going to attempt to delete: %r' % path)
self.delete_remote_file(path)
def delete_remote_file(self, path):
self._set_hadWorkToDo(True)
logging.info('delete remote file: %s' % path)
head, tail = os.path.split(path)
self.outputQueue.put(messages.Status('Deleting %s' % tail))
self.objectStore.delete_object(path, moveToTrash=True)
self.state.removeObjectSyncRecord(path)
self.outputQueue.put(self._get_working_message())
|
Sybrand/digital-panda
|
panda-tray/download.py
|
Python
|
mit
| 12,881
|
#!/usr/bin/python2
"""
A loopback bot example using LibTeletap
The following code waits for users to send messages.
Upon receiving them, the program will send the same text message back to the sender.
In case of group messages, the message is sent to the original sender and to the group.
http://github.com/rohit-h/teletap
"""
from teletap import libteletap
import time
def user_responder(action, user, message):
print ' >>> Incoming message from {0} : {1}'.format(user.user_name, message)
print ' <<< Outgoing message for {0} : {1}'.format(user.user_name, message)
action.send_typing(user); time.sleep(2)
action.send_message(user, message)
def group_responder(action, group, user, message):
print ' >>> Incoming message from {0} on {1} : {2}'.format(user.user_name, group.group_id, message)
action.send_message(user, message) # Send to user
#action.send_message(group, message) # Send to group [Use responsibly!]
if __name__ == '__main__':
CLIENT = libteletap.Teletap(binary='/usr/bin/telegram-cli', keyfile='/etc/telegram-cli/server.pub', logs='pingbot.log', quiet=False)
CLIENT.attach_user_responder(user_responder) # Attach message handler for user chats
CLIENT.attach_group_responder(group_responder) # Attach message handler for group chats
try:
CLIENT.begin() # Connect & start main event loop
except KeyboardInterrupt:
CLIENT.shutdown() # Gracefully terminate client and daemon
|
rohit-h/teletap
|
examples/loopback.py
|
Python
|
gpl-2.0
| 1,498
|
#!/usr/bin/env python3
"""Example that shows how to easily save a variable number of atoms with a
VLArray."""
import numpy as np
import tables as tb
N = 100
shape = (3, 3)
np.random.seed(10) # For reproductible results
f = tb.open_file("vlarray4.h5", mode="w")
vlarray = f.create_vlarray(f.root, 'vlarray1',
tb.Float64Atom(shape=shape),
"ragged array of arrays")
k = 0
for i in range(N):
l = []
for j in range(np.random.randint(N)):
l.append(np.random.randn(*shape))
k += 1
vlarray.append(l)
print("Total number of atoms:", k)
f.close()
|
avalentino/PyTables
|
examples/vlarray4.py
|
Python
|
bsd-3-clause
| 626
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, parseFileInfo
class DodanePl(DeadHoster):
__name__ = "DodanePl"
__type__ = "hoster"
__version__ = "0.03"
__pattern__ = r'http://(?:www\.)?dodane\.pl/file/\d+'
__description__ = """Dodane.pl hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("z00nx", "z00nx0@gmail.com")]
getInfo = create_getInfo(DodanePl)
|
sebdelsol/pyload
|
module/plugins/hoster/DodanePl.py
|
Python
|
gpl-3.0
| 442
|
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.osgunittest as osgunittest
class TestLcMaps(osgunittest.OSGTestCase):
required_rpms = ['lcmaps', 'lcmaps-db-templates', 'vo-client', 'vo-client-lcmaps-voms']
def test_01_configure(self):
core.config['lcmaps.db'] = '/etc/lcmaps.db'
core.config['lcmaps.gsi-authz'] = '/etc/grid-security/gsi-authz.conf'
core.skip_ok_unless_installed(*self.required_rpms)
template = files.read('/usr/share/lcmaps/templates/lcmaps.db.vomsmap',
as_single_string=True)
files.write(core.config['lcmaps.db'], template, owner='lcmaps')
files.write(core.config['lcmaps.gsi-authz'],
"globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n",
owner='lcmaps')
def test_02_old_xrootd_policy(self):
core.skip_ok_unless_installed('xrootd-lcmaps', *self.required_rpms)
self.skip_ok_if(core.PackageVersion('xrootd-lcmaps') >= '1.4.0')
files.append(core.config['lcmaps.db'],
'''xrootd_policy:
verifyproxynokey -> banfile
banfile -> banvomsfile | bad
banvomsfile -> gridmapfile | bad
gridmapfile -> good | vomsmapfile
vomsmapfile -> good | defaultmapfile
defaultmapfile -> good | bad
''',
backup=False)
|
efajardo/osg-test
|
osgtest/tests/test_140_lcmaps.py
|
Python
|
apache-2.0
| 1,373
|
# encoding: utf8
from django.db import models, migrations
import oluch.models
class Migration(migrations.Migration):
dependencies = [
('oluch', '0014_contest_statement_file'),
]
operations = [
migrations.AddField(
model_name='contest',
name='criteria_file',
field=models.FileField(upload_to=oluch.models.criteria_filepath, null=True, verbose_name='Criteria', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='contest',
name='statement_file',
field=models.FileField(upload_to=oluch.models.solutions_filepath, null=True, verbose_name='Statements', blank=True),
),
]
|
gurovic/oluch2
|
oluch/migrations/0015_auto_20140214_2153.py
|
Python
|
gpl-2.0
| 736
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See also: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/tkFileDialog.html
# http://tkinter.unpythonic.net/wiki/tkFileDialog
# Intended for cases where the user wants to select an existing file. If the
# user selects a nonexistent file, a popup will appear informing them that the
# selected file does not exist.
import os
import tkinter as tk
import tkinter.filedialog
HOME = os.path.expanduser("~")
root = tk.Tk()
def open_file():
path = tk.filedialog.askdirectory(parent=root,
initialdir=HOME, # optional
mustexist=True, # optional
title='Select your directory') # optional
print("PATH:", path)
open_button = tk.Button(root, text="Select a directory", command=open_file)
open_button.pack()
tk.mainloop()
|
jeremiedecock/snippets
|
python/tkinter/python3/file_dialog_open_directory.py
|
Python
|
mit
| 2,048
|
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from kolibri.core.auth.management.utils import confirm_or_exit
from kolibri.core.auth.models import FacilityUser
class Command(BaseCommand):
help = "To allow administrators to comply with GDPR requests, this command initiates the deletion process for a user."
def add_arguments(self, parser):
parser.add_argument(
"username", action="store", type=str, help="Username of user to delete"
)
parser.add_argument(
"--facility",
action="store",
type=str,
help="Facility ID that user is associated with",
)
def handle(self, *args, **options):
try:
if options["facility"]:
user = FacilityUser.objects.get(
username=options["username"], facility_id=options["facility"]
)
else:
user = FacilityUser.objects.get(username=options["username"])
except FacilityUser.DoesNotExist:
raise CommandError(
"User with username `{username}` does not exist.".format(
username=options["username"]
)
)
except FacilityUser.MultipleObjectsReturned:
raise CommandError(
(
"There is more than one user on this device with the username `{username}`. "
"Please specify the facility ID for this user.".format(
username=options["username"]
)
)
)
# ensure the user REALLY wants to do this!
confirm_or_exit(
"Are you sure you wish to permanently delete this user? This will DELETE ALL DATA FOR THIS USER."
)
confirm_or_exit(
"ARE YOU SURE? If you do this, there is no way to recover the user data on this device."
)
print(
"Proceeding with user deletion. Deleting all data for user <{}>".format(
options["username"]
)
)
user.delete(hard_delete=True)
print("Deletion complete. All data for this user has been deleted.")
|
learningequality/kolibri
|
kolibri/core/auth/management/commands/deleteuser.py
|
Python
|
mit
| 2,266
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import invoice
from openerp.report import *
|
Jgarcia-IAS/SAT
|
openerp/addons-extra/l10n_co_account/__init__.py
|
Python
|
agpl-3.0
| 1,428
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core.lib.linkedin.token import Reaction
class ReactionCopy(Reaction):
def __init__(self, *args, **kwargs):
"""docstring for __init__"""
super(ReactionCopy, self).__init__()
|
vsilent/smarty-bot
|
core/brain/update/linkedin/token/reaction.py
|
Python
|
mit
| 247
|
import struct
from .mm.addr import *
class Proxy:
def __init__(self, obj):
self.__obj = obj
def __getattr__(self, name):
return getattr(self.__obj, name)
class String(Proxy):
pass
class Integer(Proxy):
pass
class RawDump:
instance = None
def __init__(self, file):
f = open(file, "rb")
self.mem = f.read()
f.close()
RawDump.instance = self
@classmethod
def setFile(cls, file):
cls(file)
@classmethod
def getInstance(cls):
if cls.instance is None:
raise Exception(cls.__name__ + " has not been instanciated!")
return cls.instance
@staticmethod
def little_endian(dword_string):
return struct.unpack("=L", dword_string)[0]
@staticmethod
def little_endian_qword(qword_string):
return struct.unpack("=Q", qword_string)[0]
def read_dword(self, addr):
if isinstance(addr, Address):
addr = addr.pa()
try:
return Integer(RawDump.little_endian(self.mem[addr : addr+4]))
except Exception as e:
raise Exception("RawDump: Unable to read physical memory at offset %x" %addr)
def read_qword(self, addr):
if isinstance(addr, Address):
addr = addr.pa()
try:
return Integer(RawDump.little_endian_qword(self.mem[addr : addr+8]))
except Exception as e:
raise Exception("RawDump: Unable to read physical memory at offset %x" %addr)
def read_string(self, addr, maxlen=100):
if isinstance(addr, Address):
addr = addr.pa()
string = ""
for i in range(addr, addr + maxlen):
if self.mem[i] != "\x00":
string += self.mem[i]
else:
break
return String(string)
def read_bytes(self, addr_begin, l):
# Get physical address
if isinstance(addr_begin, Address):
addr_begin = addr_begin.pa()
return self.mem[addr_begin : addr_begin+l]
|
byeoksan/MemScope
|
memscope/volatilitux_android_x86/core/raw_dump.py
|
Python
|
gpl-2.0
| 1,914
|
import theano.tensor as tt
from theano import function
|
Delosari/dazer
|
bin/lib/Math_Libraries/functions_for_theano.py
|
Python
|
mit
| 55
|
from django.shortcuts import render
# Create your views here.
def about(request):
return render(request, 'sitepages/about.html')
|
bestafubana/blogn
|
blogn/blogn/sitepages/views.py
|
Python
|
gpl-3.0
| 133
|
artist_answer = input("Search for an artist")
import requests
response = requests.get('https://api.spotify.com/v1/search?q='+artist_answer+'&type=artist&market=US&limit=50')
#https://api.spotify.com/v1/search?type=artist
artist = response.json()
#print(artist.keys())
#print(artist['artists'].keys())
#print(type(artist['artists']['items']))
artist_count = 0
for artist in artist['artists']['items']:
artist_name = artist['name']
artist_id= artist['id']
artist_count = artist_count + 1
#artist_dictionary[artist_count]= artist_name
artist_dictionary = {}
artist_dictionary['number'] = artist_count
artist_dictionary['name'] = artist['name']
artist_dictionary['id'] = artist['id']
print(artist_dictionary['name'],artist_dictionary['number'] )
selection= input("Get more info about one of this artists by inputing its number")
if selection
for artist in artist_dictionary['number']:
print(artist_dictionary['name'])
#for item in artist_dictionary[number]
#if number == artist_dictionary['number']:
#for item in number:
#print(item)
#selected_number = artist_dictionary['id']
#print(selected_number)
import requests
response = requests.get('https://api.spotify.com/v1/artists/'+selected_number+'/top-tracks?country=US')
artist_toptracks= response.json()
top_tracks = artist_toptracks['tracks']
for tracks in top_tracks:
print("These are", artist_dictionary['name'],"top tracks:", tracks['name'])
|
mercybenzaquen/foundations-homework
|
foundations_hw/benzaquen_search_engine.py
|
Python
|
mit
| 1,470
|
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import urlfetch
import urllib
import logging
import json
#db table for pattern
class DbNews(ndb.Model):
pair = ndb.StringProperty()
pr = ndb.StringProperty()
alexa = ndb.StringProperty()
positive = ndb.IntegerProperty()
negative = ndb.IntegerProperty()
raatio = ndb.IntegerProperty()
def search(q,pair):
#if (!type || type == '')
type = 'all+types';
url = 'http://pipes.yahoo.com/pipes/pipe.run?_id=16f66113a9aa9c39dd93df8806949159&_render=json&filterinput=' + type + '&maininput=' + q
urlfetch.set_default_fetch_deadline(45)
result = urlfetch.fetch(url)
if result.status_code == 200:
resJSON = json.loads(result.content)
items = resJSON.value.items
cnt = 0
for i in items:
link = i.link
title = i.title
url_link = 'http://dotfornote.appspot.com/?url=' + link + '&wrds=1'
urlfetch.set_default_fetch_deadline(45)
result = urlfetch.fetch(url)
if result.status_code == 200:
resItems = json.loads(result.content)
pos += int(resItems['positive'])
neg += int(resItems['negative'])
cnt += 1
return '<p> pos = [' + str((pos/cnt)) + '] Neg [' + str((neg/cnt)) + ']<br />'
else:
return '<p> No Resilt for this request <br/>'
|
alibaba0507/fx-monitor
|
News.py
|
Python
|
gpl-2.0
| 1,355
|
#
# Adapted from https://github.com/petewarden/pyparallelcurl/blob/master/pyparallelcurl.py
#
# This class is designed to make it easy to run multiple curl requests in parallel, rather than
# waiting for each one to finish before starting the next. Under the hood it uses curl_multi_exec
# but since I find that interface painfully confusing, I wanted one that corresponded to the tasks
# that I wanted to run.
#
# To use it, first create the ParallelCurl object:
#
# parallel_curl = ParallelCurl(10);
#
# The first argument to the constructor is the maximum number of outstanding fetches to allow
# before blocking to wait for one to finish. You can change this later using setMaxRequests()
# The second optional argument is an array of curl options in the format used by curl_setopt_array()
#
# Next, start a URL fetch:
#
# parallel_curl.startrequest('http://example.com', onrequestdone, {'somekey': 'somevalue'})
#
# The first argument is the address that should be fetched
# The second is the callback function that will be run once the request is done
# The third is a 'cookie', that can contain arbitrary data to be passed to the callback
#
# This startrequest call will return immediately, as long as less than the maximum number of
# requests are outstanding. Once the request is done, the callback function will be called, eg:
#
# onrequestdone(content, 'http://example.com', ch, {'somekey': 'somevalue'})
#
# The callback should take four arguments. The first is a string containing the content found at
# the URL. The second is the original URL requested, the third is the curl handle of the request that
# can be queried to get the results, and the fourth is the arbitrary 'cookie' value that you
# associated with this object. This cookie contains user-defined data.
#
# By Pete Warden <pete@petewarden.com>, freely reusable, see http://petewarden.typepad.com for more
import sys
import pycurl
import cStringIO
import time
# Pete- Not quite sure what this is all about, but seems important, so copied from
# retriever-multi.py :)
#
# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see
# the libcurl tutorial for more info.
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
class ParallelCurl:
max_requests = 10
options = {}
outstanding_requests = {}
multi_handle = None
def __init__(self, in_max_requests = 10, in_options = {}):
self.max_requests = in_max_requests
self.options = in_options
self.outstanding_requests = {}
self.multi_handle = pycurl.CurlMulti()
# Ensure all the requests finish nicely
def __del__(self):
print 'self.max_requests='+str(self.max_requests)
self.finishallrequests()
# Sets how many requests can be outstanding at once before we block and wait for one to
# finish before starting the next one
def setmaxrequests(self, in_max_requests):
self.max_requests = in_max_requests
# Sets the options to pass to curl, using the format of curl_setopt_array()
def setoptions(self, in_options):
self.options = in_options
# Start a fetch from the 'url' address, calling the 'callback' function passing the optional
# 'user_data' value. The callback should accept 3 arguments, the url, curl handle and user
# data, eg on_request_done(url, ch, user_data). force_get is to make the custom request
# GET instead of POST
def startrequest(self, url, callback, user_data = {}, post_fields=None, force_get=False):
if self.max_requests > 0:
self.waitforoutstandingrequeststodropbelow(self.max_requests)
ch = pycurl.Curl()
for option, value in self.options.items():
ch.setopt(option, value)
ch.setopt(pycurl.URL, url)
result_buffer = cStringIO.StringIO()
ch.setopt(pycurl.WRITEFUNCTION, result_buffer.write)
if post_fields is not None:
if not force_get:
ch.setopt(pycurl.POST, True)
else:
ch.setopt(pycurl.CUSTOMREQUEST, 'GET')
ch.setopt(pycurl.POSTFIELDS, post_fields)
self.multi_handle.add_handle(ch)
self.outstanding_requests[ch] = {
'handle': ch,
'result_buffer': result_buffer,
'url': url,
'callback': callback,
'user_data':user_data
}
self.checkforcompletedrequests()
# You *MUST* call this function at the end of your script. It waits for any running requests
# to complete, and calls their callback functions
def finishallrequests(self):
self.waitforoutstandingrequeststodropbelow(1)
# Checks to see if any of the outstanding requests have finished
def checkforcompletedrequests(self):
# Call select to see if anything is waiting for us
if self.multi_handle.select(1.0) == -1:
return;
# Since something's waiting, give curl a chance to process it
while True:
ret, num_handles = self.multi_handle.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
# Now grab the information about the completed requests
while True:
num_q, ok_list, err_list = self.multi_handle.info_read()
for ch in ok_list:
if ch not in self.outstanding_requests:
raise RuntimeError("Error - handle wasn't found in requests: '"+str(ch)+"' in "
+str(self.outstanding_requests))
request = self.outstanding_requests[ch]
url = request['url']
content = request['result_buffer'].getvalue()
callback = request['callback']
user_data = request['user_data']
callback(content, url, ch, user_data)
self.multi_handle.remove_handle(ch)
del self.outstanding_requests[ch]
for ch, errno, errmsg in err_list:
if ch not in self.outstanding_requests:
raise RuntimeError("Error - handle wasn't found in requests: '"+str(ch)+"' in "
+str(self.outstanding_requests))
request = self.outstanding_requests[ch]
url = request['url']
content = None
callback = request['callback']
user_data = request['user_data']
callback(content, url, ch, user_data)
self.multi_handle.remove_handle(ch)
del self.outstanding_requests[ch]
if num_q < 1:
break
# Blocks until there's less than the specified number of requests outstanding
def waitforoutstandingrequeststodropbelow(self, max):
while True:
self.checkforcompletedrequests()
if len(self.outstanding_requests) < max:
break
time.sleep(0.01)
|
zynga/zperfmon
|
server/db_cron/parallelcurl.py
|
Python
|
apache-2.0
| 7,312
|
BROKER_URL = 'amqp://couchbase:couchbase@172.23.97.73:5672/broker'
BROKER_POOL_LIMIT = None
CELERY_RESULT_BACKEND = 'amqp'
CELERY_RESULT_EXCHANGE = 'perf_results'
CELERY_RESULT_PERSISTENT = False
CELERYD_HIJACK_ROOT_LOGGER = False
|
pavel-paulau/perfrunner
|
perfrunner/celeryremote.py
|
Python
|
apache-2.0
| 231
|
#
# A script to find functional loci in cancer genes, using
# reconstructed ancestors of kinase genes and a library
# of mutations obeserved in many real cancer strains.
#
# Victor Hanson-Smith
# 2014
# victorhansonsmith@gmail.com
#
import os, sys, re
from argParser import *
ap = ArgParser(sys.argv)
msa_path = "../2014-Feb25/msaprobs/cmgc.msaprobs.phylip"
seed_cbiopath = {"Homo.sapiens.CDK1":"cBioPortal/cdk1.txt",
"Homo.sapiens.CDK2":"cBioPortal/cdk2.txt",
"Homo.sapiens.CDK4":"cBioPortal/cdk4.txt",
"Homo.sapiens.CDK6":"cBioPortal/cdk6.txt",
"Homo.sapiens.MAPK1":"cBioPortal/mapk1.txt",
"Homo.sapiens.MAPK3":"cBioPortal/mapk3.txt",
"Homo.sapiens.MAP2K1":"cBioPortal/map2k1.txt",
"Homo.sapiens.MAP2k2":"cBioPortal/map2k2.txt"}
branch_dirs = [] # a list of directories, one directory for each branch, containing mutation information
for d in os.listdir("../2014-Feb25"):
if d.startswith("Anc") and d.__contains__("to"):
branch_dirs.append("../2014-Feb25/" + d)
from string import uppercase
def col_idx2str(idx):
idx += 1
col = ''
while (idx > 0):
mod = (idx - 1) % 26
col = uppercase[mod] + col
idx = (idx - mod) / 26
return col
def build_site_map(mpath):
"""Returns (ref2seed, seed2ref)"""
"""mpath is the filepath to a PHYLIP aligned file."""
if False == os.path.exists(mpath):
print "\n. Error: I can't find your MSA file at", msapth
exit()
seed2ref = {} # key = taxa name, value = hash; key = site, value = site
ref2seed = {}
fin = open(mpath, "r")
line1 = fin.readline()
nsites = int( line1.split()[1] )
for l in fin.xreadlines():
tokens = l.split()
this_taxa = tokens[0]
this_seq = tokens[1]
seed2ref[this_taxa] = {}
ref2seed[this_taxa] = {}
jj = -1
for ii in range(0, nsites):
c = this_seq[ii]
if c == "-":
continue
else:
jj += 1
ref2seed[this_taxa][ii] = jj
seed2ref[this_taxa][jj] = ii
return (ref2seed, seed2ref)
def read_cbio_file(cpath):
if False == os.path.exists(cpath):
print "\n. Error, I cannot find your cBioPortal data file at", cpath
exit()
mu_data = {} # key = site
fin = open(cpath, "r")
for l in fin.xreadlines():
if l.__len__() <= 1:
continue
tokens = l.split()
if tokens.__len__() <= 8:
continue
mu_name = tokens[0]
type_ii = None
for tt in range(0, tokens.__len__()):
if tokens[tt] == "Missense" or tokens[tt] == "Nonsense":
type_ii = tt
if type_ii == None:
#print tokens
continue
next = 1
study = ""
mu_ii = None
if tokens[type_ii-1] == "3D":
study = " ".join( tokens[next:type_ii-2] )
mu_ii = type_ii-2
else:
study = " ".join( tokens[next:type_ii-1] )
mu_ii = type_ii - 1
mu = tokens[mu_ii]
fromstate = mu[0]
tostate = mu[ mu.__len__()-1 ]
#print fromstate, tostate, l
site = None
if mu.__len__() == 3:
site = int( mu[1] )
else:
site = int( mu[1:mu.__len__()-1] )
type = tokens[type_ii]
next = type_ii + 1
copy = tokens[next]
next += 1
if tokens[next] != "U" and tokens[next] != "V":
next += 2
else:
next += 1
#print l
#print tokens
#print next, fromstate, tostate, site, type, copy
#print tokens[next]
if tokens.__len__() - next == 3:
next += 1 # skip mutation assessor
allele_freq = tokens[next]
next += 1
n_in_sample = tokens[next]
mu_data[mu_name] = (study, fromstate, tostate, site, type, allele_freq, n_in_sample)
return mu_data
def collapse_mu_data(mu_data):
site_mu_data = {} # key = site, value = mu_data hashes corresponding to that site
for muname in mu_data:
site = mu_data[muname][3]
if site not in site_mu_data:
site_mu_data[site] = {}
site_mu_data[site][muname] = mu_data[muname]
return site_mu_data
def parse_df(dpath):
if False == os.path.exists(dpath):
print "\n. Error, I cannot find your Df path at", dpath
exit()
key_model = "msaprobs.PROTCATLG"
refsite_df = {}
refsite_fromstate = {}
refsite_frompp = {}
refsite_tostate = {}
refsite_topp = {}
fin = open(dpath, "r")
last_df = None
last_refsite = None
keyc = 0
for l in fin.xreadlines():
if l.startswith("-->"):
tokens = l.split()
last_df = float(tokens[3])
last_ref = None
last_refsite = None
elif l.startswith( key_model ):
#print key_model, l
tokens = l.split()
last_refsite = int( tokens[2] )
refsite_df[last_refsite] = last_df
keyc = 2
elif keyc == 2:
#print 2, l
keyc = 1
tokens = l.split()
refsite_fromstate[ last_refsite ] = tokens[3]
refsite_frompp[ last_refsite ] = tokens[4]
elif keyc == 1:
#print 1, l
keyc = 0
tokens = l.split()
refsite_tostate[ last_refsite ] = tokens[3]
refsite_topp[ last_refsite ] = tokens[4]
#print "174:", refsite_topp[ last_refsite ], refsite_frompp[ last_refsite ]
return (refsite_df, refsite_fromstate, refsite_frompp, refsite_tostate, refsite_topp)
(ref2seed, seed2ref) = build_site_map(msa_path)
def get_branch_data(branchname):
"""x = (refsite_df, refsite_fromstate, refsite_frompp, refsite_tostate, refsite_topp)"""
x = parse_df( branchname + "/Df.details.txt")
return x
def get_branch_musites(branchname, ppthresh = 0.6):
"""x = (refsite_df, refsite_fromstate, refsite_frompp, refsite_tostate, refsite_topp)"""
x = parse_df( branchname + "/Df.details.txt")
branch_musites[branchname] = []
for site in x[0]:
if x[1][site] != x[3][site]:
if float( x[2][site] ) > 0.6 or float( x[4][site] ) > 0.6:
branch_musites[branchname].append( site )
return branch_musites
#
# branch_data
#
branch_data = {}
branch_musites = {}
for branchname in branch_dirs:
branch_data[branchname] = get_branch_data(branchname)
branch_musites = get_branch_musites(branchname)
branches_sorted = branch_data.keys()
branches_sorted.sort()
for branch in branch_musites:
print branch, branch_musites[branch].__len__()
#
# Excel styles:
#
from xlwt import Workbook, easyxf, Borders, Formula, XFStyle
hit_style1 = easyxf('pattern: pattern solid, fore_colour orange;')
hit_style2 = easyxf('pattern: pattern solid, fore_colour yellow;')
hit_style3 = easyxf('pattern: pattern solid, fore_colour yellow;')
stripe_style1 = easyxf('pattern: pattern solid, fore_colour white;')
stripe_style2 = easyxf('pattern: pattern solid, fore_colour white;')
header_style = easyxf('font: bold True;')
wrap_style = easyxf('align: wrap 1;')
center_style = easyxf('alignment: horizontal center;')
stats_style = XFStyle()
stats_style.num_format_str = "0.0000"
#
#
#
book = Workbook()
for seed in seed_cbiopath:
if seed not in ref2seed:
print "\n. I can't find the taxa", seed, "in your MSA. I'm skipping it!"
continue
mu_data = read_cbio_file( seed_cbiopath[seed] )
if mu_data.__len__() <= 0:
print "\n. I didn't find any data in", seed_cbiopath[seed]
site_mu_data = collapse_mu_data( mu_data )
branch_counthits = {}
for branch in branches_sorted:
branch_counthits[branch] = 0
branch_countvalidsites = {} # count the number of cBio sites that actually existed on this branch
for branch in branches_sorted:
branch_countvalidsites[branch] = 0
"""Open an Excel file and write data as we gather it."""
sheet1 = book.add_sheet(seed)
sheet1.write(0,0,"Site (cBioPortal)", header_style)
sheet1.col(0).width = 3700
sheet1.write(0,1,"Observed mutations (cBioPortal)", header_style)
sheet1.col(1).width = 7000
sheet1.write(0,2,"site (MSAProbs)", header_style)
sheet1.col(2).width = 3800
col = 3
for branch in branches_sorted:
btok = branch.split("/")
branch_short = btok[ btok.__len__()-1 ]
sheet1.write(0,col,branch_short, header_style)
sheet1.col(col).width = 6000
col += 1
sheet1.write(0,col,"N hits", header_style)
row = 1
col = None
"""One site per row."""
for site in site_mu_data:
count_mu_branches = 0 # count the number of branches that have a mutation at this site.
found_mu_for_row = False
found_convergent_mu_for_row = False
sheet1.write(row,0,site,center_style)
fromaas = []
toaas = []
mutations = []
for muname in site_mu_data[site]:
data = mu_data[muname]
fromaa = data[1]
toaa = data[2]
if fromaa not in fromaas:
fromaas.append( fromaa )
if toaa not in toaas:
toaas.append( toaa )
mutations.append( fromaa + "->" + toaa)
sheet1.write(row,1, ", ".join( mutations ), wrap_style)
"""site, to state, from state"""
refsite = seed2ref[seed][ site_mu_data[site][ site_mu_data[site].keys()[0] ][3] - 1]
sheet1.write(row, 2, refsite+1, center_style)
col = 2
branch_count = 0
for branch in branches_sorted:
branch_count += 1
if refsite+1 in branch_data[branch][0]:
branch_countvalidsites[branch] += 1
this_df = branch_data[branch][0][refsite+1]
from_state = branch_data[branch][1][refsite+1]
from_pp = branch_data[branch][2][refsite+1]
to_state = branch_data[branch][3][refsite+1]
to_pp = branch_data[branch][4][refsite+1]
if from_state == "-":
from_pp = "n/a"
from_state = "-"
if to_state == "-":
to_pp = "n/a"
to_state = "-"
"""Reversion?"""
if to_state in fromaas and from_state != to_state:
found_convergent_mu_for_row = True
count_mu_branches += 1
branch_counthits[branch] += 1
col += 1
sheet1.write(row, col, from_state + "(" + from_pp + ") -> " + to_state + "(" + to_pp + ")", hit_style1)
elif (from_state != to_state) and to_pp != "n/a" and ( float(to_pp) > 0.6 ):
found_mu_for_row = True
count_mu_branches += 1
branch_counthits[branch] += 1
col += 1
sheet1.write(row, col, from_state + "(" + from_pp + ") -> " + to_state + "(" + to_pp + ")", hit_style2)
else:
st = stripe_style2
if branch_count%2 == 0:
st = stripe_style1
col += 1
sheet1.write(row, col, from_state + "(" + from_pp + ") -> " + to_state + "(" + to_pp + ")", st)
else:
st = stripe_style2
if branch_count%2 == 0:
st = stripe_style1
col += 1
sheet1.write(row,col, "NA", st)
if count_mu_branches > 0:
sheet1.write(row,col+1, count_mu_branches, center_style)
row += 1
row += 1
col = 2
sheet1.write(row, col, "hits:")
for branch in branches_sorted:
col += 1
sheet1.write(row,col, branch_counthits[branch], st)
row += 1
col = 2
sheet1.write(row, col, "hit max possible:", wrap_style)
for branch in branches_sorted:
col += 1
sheet1.write(row,col, branch_countvalidsites[branch], st)
row += 1
col = 2
sheet1.write(row, col, "Count mu on branch:", wrap_style)
for branch in branches_sorted:
col += 1
sheet1.write(row,col, branch_musites[branch].__len__(), st)
row += 1
col = 2
sheet1.write(row, col, "Count sites in ancestors:", wrap_style)
for branch in branches_sorted:
col += 1
sheet1.write(row,col, branch_data[branch][0].keys().__len__(), st)
row += 1
col = 2
sheet1.write(row, col, "Hypergeometric Test:", wrap_style)
for branch in branches_sorted:
col += 1
# the previous row is 'row'
fstring = "HYPGEOMDIST(" + col_idx2str(col) + (row-3).__str__()
fstring += "," + col_idx2str(col) + (row-2).__str__()
fstring += "," + col_idx2str(col) + (row-1).__str__()
fstring += "," + col_idx2str(col) + (row).__str__()
fstring += ")"
sheet1.write(row,col, Formula(fstring), stats_style)
row += 1
col = 2
sheet1.write(row, col, "Sanity Check:", wrap_style)
for branch in branches_sorted:
col += 1
# the previous row is 'row'
fstring = "0 "
for ii in range(0, branch_countvalidsites[branch]+1):
fstring += "+ HYPGEOMDIST(" + ii.__str__()
fstring += "," + col_idx2str(col) + (row-3).__str__()
fstring += "," + col_idx2str(col) + (row-2).__str__()
fstring += "," + col_idx2str(col) + (row-1).__str__()
fstring += ")"
sheet1.write(row,col, Formula(fstring), stats_style)
row += 1
col = 2
sheet1.write(row, col, "<= Cummu. HG Test:", wrap_style)
for branch in branches_sorted:
col += 1
# the previous row is 'row'
fstring = "0 "
for ii in range(0, branch_counthits[branch]+1):
fstring += "+ HYPGEOMDIST(" + ii.__str__()
fstring += "," + col_idx2str(col) + (row-4).__str__()
fstring += "," + col_idx2str(col) + (row-3).__str__()
fstring += "," + col_idx2str(col) + (row-2).__str__()
fstring += ")"
sheet1.write(row,col, Formula(fstring), stats_style)
row += 1
col = 2
sheet1.write(row, col, ">= Cummu. HG Test:", wrap_style)
for branch in branches_sorted:
col += 1
# the previous row is 'row'
fstring = "0 "
for ii in range(branch_counthits[branch], branch_countvalidsites[branch]+1):
fstring += "+ HYPGEOMDIST(" + ii.__str__()
fstring += "," + col_idx2str(col) + (row-5).__str__()
fstring += "," + col_idx2str(col) + (row-4).__str__()
fstring += "," + col_idx2str(col) + (row-3).__str__()
fstring += ")"
sheet1.write(row,col, Formula(fstring), stats_style)
book.save("ASR-cBio.11-7-2014.xls")
exit()
|
vhsvhs/ratflic
|
runme.py
|
Python
|
gpl-2.0
| 15,185
|
"""
The legend module defines the Legend class, which is responsible for
drawing legends associated with axes and/or figures.
The Legend class can be considered as a container of legend handles
and legend texts. Creation of corresponding legend handles from the
plot elements in the axes or figures (e.g., lines, patches, etc.) are
specified by the handler map, which defines the mapping between the
plot elements and the legend handlers to be used (the default legend
handlers are defined in the :mod:`~matplotlib.legend_handler` module). Note
that not all kinds of artist are supported by the legend yet (See
:ref:`plotting-guide-legend` for more information).
"""
from __future__ import division, print_function
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection, \
CircleCollection, PathCollection
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.transforms import BboxTransformTo, BboxTransformFrom
from matplotlib.offsetbox import HPacker, VPacker, TextArea, DrawingArea
from matplotlib.offsetbox import DraggableOffsetBox
from matplotlib.container import ErrorbarContainer, BarContainer, StemContainer
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
import legend_handler
class DraggableLegend(DraggableOffsetBox):
def __init__(self, legend, use_blit=False, update="loc"):
"""
update : If "loc", update *loc* parameter of
legend upon finalizing. If "bbox", update
*bbox_to_anchor* parameter.
"""
self.legend = legend
if update in ["loc", "bbox"]:
self._update = update
else:
raise ValueError("update parameter '%s' is not supported." %
update)
DraggableOffsetBox.__init__(self, legend, legend._legend_box,
use_blit=use_blit)
def artist_picker(self, legend, evt):
return self.legend.contains(evt)
def finalize_offset(self):
loc_in_canvas = self.get_loc_in_canvas()
if self._update == "loc":
self._update_loc(loc_in_canvas)
elif self._update == "bbox":
self._update_bbox_to_anchor(loc_in_canvas)
else:
raise RuntimeError("update parameter '%s' is not supported." %
self.update)
def _update_loc(self, loc_in_canvas):
bbox = self.legend.get_bbox_to_anchor()
# if bbox has zero width or height, the transformation is
# ill-defined. Fall back to the defaul bbox_to_anchor.
if bbox.width == 0 or bbox.height == 0:
self.legend.set_bbox_to_anchor(None)
bbox = self.legend.get_bbox_to_anchor()
_bbox_transform = BboxTransformFrom(bbox)
self.legend._loc = tuple(
_bbox_transform.transform_point(loc_in_canvas))
def _update_bbox_to_anchor(self, loc_in_canvas):
tr = self.legend.axes.transAxes
loc_in_bbox = tr.transform_point(loc_in_canvas)
self.legend.set_bbox_to_anchor(loc_in_bbox)
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
"""
codes = {'best': 0, # only implemented for axis legends
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc=None,
numpoints=None, # the number of points in the legend line
markerscale=None, # the relative size of legend markers
# vs. original
scatterpoints=3, # TODO: may be an rcParam
scatteryoffsets=None,
prop=None, # properties for the legend texts
fontsize=None, # keyword to set font size directly
# the following dimensions are in axes coords
pad=None, # deprecated; use borderpad
labelsep=None, # deprecated; use labelspacing
handlelen=None, # deprecated; use handlelength
handletextsep=None, # deprecated; use handletextpad
axespad=None, # deprecated; use borderaxespad
# spacing & pad defined as a fraction of the font-size
borderpad=None, # the whitespace inside the legend border
labelspacing=None, # the vertical space between the legend
# entries
handlelength=None, # the length of the legend handles
handleheight=None, # the height of the legend handles
handletextpad=None, # the pad between the legend handle
# and text
borderaxespad=None, # the pad between the axes and legend
# border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns.
# None, "expand"
fancybox=None, # True use a fancy box, false use a rounded
# box, none use rc
shadow=None,
title=None, # set a title for the legend
bbox_to_anchor=None, # bbox that the legend will be anchored.
bbox_transform=None, # transform for the bbox
frameon=None, # draw frame
handler_map=None,
):
"""
- *parent*: the artist that contains the legend
- *handles*: a list of artists (lines, patches) to be added to the
legend
- *labels*: a list of strings to label the legend
Optional keyword arguments:
================ ====================================================
Keyword Description
================ ====================================================
loc a location code
prop the font property
fontsize the font size (used only if prop is not specified)
markerscale the relative size of legend markers vs. original
numpoints the number of points in the legend for line
scatterpoints the number of points in the legend for scatter plot
scatteryoffsets a list of yoffsets for scatter symbols in legend
frameon if True, draw a frame around the legend.
If None, use rc
fancybox if True, draw a frame with a round fancybox.
If None, use rc
shadow if True, draw a shadow behind legend
ncol number of columns
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handleheight the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
title the legend title
bbox_to_anchor the bbox that the legend will be anchored.
bbox_transform the transform for the bbox. transAxes if None.
================ ====================================================
The pad and spacing parameters are measured in font-size units. E.g.,
a fontsize of 10 points and a handlelength=5 implies a handlelength of
50 points. Values from rcParams will be used if None.
Users can specify any arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
See :meth:`set_bbox_to_anchor` for more detail.
The legend location can be specified by setting *loc* with a tuple of
2 floats, which is interpreted as the lower-left corner of the legend
in the normalized axes coordinate.
"""
# local import only to avoid circularity
from matplotlib.axes import Axes
from matplotlib.figure import Figure
Artist.__init__(self)
if prop is None:
if fontsize is not None:
self.prop = FontProperties(size=fontsize)
else:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self._fontsize = self.prop.get_size_in_points()
propnames = ["numpoints", "markerscale", "shadow", "columnspacing",
"scatterpoints", "handleheight"]
self.texts = []
self.legendHandles = []
self._legend_title_box = None
self._handler_map = handler_map
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend." + name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad": "borderpad",
"labelsep": "labelspacing",
"handlelen": "handlelength",
"handletextsep": "handletextpad",
"axespad": "borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height) / self._fontsize
for k, v in deprecated_kwds.iteritems():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
mplDeprecation)
setattr(self, v, localdict[k] * axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend." + v])
else:
setattr(self, v, localdict[v])
del localdict
handles = list(handles)
if len(handles) < 2:
ncol = 1
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be > 0; it was %d" % numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.scatterpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets,
reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent, Axes):
self.isaxes = True
self.set_axes(parent)
self.set_figure(parent.figure)
elif isinstance(parent, Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0, 'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back '
'on "best"; valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.iterkeys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back '
'on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.iterkeys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not '
'implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._mode = mode
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor=rcParams["axes.facecolor"],
edgecolor=rcParams["axes.edgecolor"],
mutation_scale=self._fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox:
self.legendPatch.set_boxstyle("round", pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square", pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = frameon
if frameon is None:
self._drawFrame = rcParams["legend.frameon"]
# init with null renderer
self._init_legend_box(handles, labels)
self._loc = loc
self.set_title(title)
self._last_fontsize_points = self._fontsize
self._draggable = None
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
if self.isaxes:
a.set_axes(self.axes)
a.set_transform(self.get_transform())
def _set_loc(self, loc):
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
self._loc_real = loc
if loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
#def findoffset(width, height, xdescent, ydescent):
# return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(_findoffset)
self._loc_real = loc
def _get_loc(self):
return self._loc_real
_loc = property(_get_loc, _set_loc)
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Helper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox + xdescent, oy + ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Helper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc) == 2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.get_bbox_to_anchor()
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox,
self.get_bbox_to_anchor(),
renderer)
return x + xdescent, y + ydescent
@allow_rasterization
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible():
return
renderer.open_group('legend')
fontsize = renderer.points_to_pixels(self._fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2 * (self.borderaxespad + self.borderpad) * fontsize
self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)
# update the location and size of the legend. This needs to
# be done in any case to clip the figure right.
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self._drawFrame:
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self._fontsize
else:
return renderer.points_to_pixels(self._fontsize)
# _default_handler_map defines the default mapping between plot
# elements and the legend handlers.
_default_handler_map = {
StemContainer: legend_handler.HandlerStem(),
ErrorbarContainer: legend_handler.HandlerErrorbar(),
Line2D: legend_handler.HandlerLine2D(),
Patch: legend_handler.HandlerPatch(),
LineCollection: legend_handler.HandlerLineCollection(),
RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),
CircleCollection: legend_handler.HandlerCircleCollection(),
BarContainer: legend_handler.HandlerPatch(
update_func=legend_handler.update_from_first_child),
tuple: legend_handler.HandlerTuple(),
PathCollection: legend_handler.HandlerPathCollection()
}
# (get|set|update)_default_handler_maps are public interfaces to
# modify the defalut handler map.
@classmethod
def get_default_handler_map(cls):
"""
A class method that returns the default handler map.
"""
return cls._default_handler_map
@classmethod
def set_default_handler_map(cls, handler_map):
"""
A class method to set the default handler map.
"""
cls._default_handler_map = handler_map
@classmethod
def update_default_handler_map(cls, handler_map):
"""
A class method to update the default handler map.
"""
cls._default_handler_map.update(handler_map)
def get_legend_handler_map(self):
"""
return the handler map.
"""
default_handler_map = self.get_default_handler_map()
if self._handler_map:
hm = default_handler_map.copy()
hm.update(self._handler_map)
return hm
else:
return default_handler_map
@staticmethod
def get_legend_handler(legend_handler_map, orig_handle):
"""
return a legend handler from *legend_handler_map* that
corresponds to *orig_handler*.
*legend_handler_map* should be a dictionary object (that is
returned by the get_legend_handler_map method).
It first checks if the *orig_handle* itself is a key in the
*legend_hanler_map* and return the associated value.
Otherwise, it checks for each of the classes in its
method-resolution-order. If no matching key is found, it
returns None.
"""
legend_handler_keys = legend_handler_map.keys()
if orig_handle in legend_handler_keys:
handler = legend_handler_map[orig_handle]
else:
for handle_type in type(orig_handle).mro():
if handle_type in legend_handler_map:
handler = legend_handler_map[handle_type]
break
else:
handler = None
return handler
def _init_legend_box(self, handles, labels):
"""
Initialize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self._fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
descent = 0.35 * self._approx_text_height() * (self.handleheight - 0.7)
# 0.35 and 0.7 are just heuristic numbers. this may need to be improbed
height = self._approx_text_height() * self.handleheight - descent
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their coordinates should
# be given in the display coordinates.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
legend_handler_map = self.get_legend_handler_map()
for orig_handle, lab in zip(handles, labels):
handler = self.get_legend_handler(legend_handler_map, orig_handle)
if handler is None:
warnings.warn(
"Legend does not support %s\nUse proxy artist "
"instead.\n\n"
"http://matplotlib.sourceforge.net/users/legend_guide.html#using-proxy-artist\n" %
(str(orig_handle),))
handle_list.append(None)
continue
textbox = TextArea(lab, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handlebox = DrawingArea(width=self.handlelength * fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handler(self, orig_handle,
#xdescent, ydescent, width, height,
fontsize,
handlebox)
handle_list.append(handle)
handleboxes.append(handlebox)
if len(handleboxes) > 0:
# We calculate number of rows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaining
# (num_smallcol) columns will have (nrows) rows.
ncol = min(self._ncol, len(handleboxes))
nrows, num_largecol = divmod(len(handleboxes), ncol)
num_smallcol = ncol - num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0,
num_largecol * (nrows + 1),
(nrows + 1)),
[nrows + 1] * num_largecol)
smallcol = safezip(range(num_largecol * (nrows + 1),
len(handleboxes),
nrows),
[nrows] * num_smallcol)
else:
largecol, smallcol = [], []
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol + smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad * fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0 + di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing * fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing * fontsize
self._legend_handle_box = HPacker(pad=0,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_title_box = TextArea("")
self._legend_box = VPacker(pad=self.borderpad * fontsize,
sep=self.labelspacing * fontsize,
align="center",
children=[self._legend_title_box,
self._legend_handle_box])
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
# should always hold because function is only called internally
assert self.isaxes
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self.set_frame_on(b)
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
children.extend(self.get_lines())
children.extend(self.get_patches())
children.extend(self.get_texts())
children.append(self.get_frame())
if self._legend_title_box:
children.append(self.get_title())
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch',
[h for h in self.legendHandles
if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def set_title(self, title, prop=None):
"""
set the legend title. Fontproperties can be optionally set
with *prop* parameter.
"""
self._legend_title_box._text.set_text(title)
if prop is not None:
if isinstance(prop, dict):
prop = FontProperties(**prop)
self._legend_title_box._text.set_fontproperties(prop)
if title:
self._legend_title_box.set_visible(True)
else:
self._legend_title_box.set_visible(False)
def get_title(self):
'return Text instance for the legend title'
return self._legend_title_box._text
def get_window_extent(self, *args, **kwargs):
'return a extent of the the legend'
return self.legendPatch.get_window_extent(*args, **kwargs)
def get_frame_on(self):
"""
Get whether the legend box patch is drawn
"""
return self._drawFrame
def set_frame_on(self, b):
"""
Set whether the legend box patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._drawFrame = b
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.parent.bbox
else:
return self._bbox_to_anchor
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the legend will be anchored.
*bbox* can be a BboxBase instance, a tuple of [left, bottom,
width, height] in the given transform (normalized axes
coordinate if None), or a tuple of [left, bottom] where the
width and height will be assumed to be zero.
"""
if bbox is None:
self._bbox_to_anchor = None
return
elif isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
if transform is None:
transform = BboxTransformTo(self.parent.bbox)
self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,
transform)
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding
"best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self._fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
# should always hold because function is only called internally
assert self.isaxes
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.get_bbox_to_anchor(),
renderer)
for x
in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
def contains(self, event):
return self.legendPatch.contains(event)
def draggable(self, state=None, use_blit=False, update="loc"):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the legend on the canvas with
the mouse. The DraggableLegend helper instance is returned if
draggable is on.
The update parameter control which parameter of the legend changes
when dragged. If update is "loc", the *loc* paramter of the legend
is changed. If "bbox", the *bbox_to_anchor* parameter is changed.
"""
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableLegend(self,
use_blit,
update=update)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/matplotlib/legend.py
|
Python
|
mit
| 37,156
|
import os
import logging
import report
import problem
import problem.config
class DBusProxy(object):
__instance = None
def __init__(self, dbus):
self._proxy = None
self._iface = None
self.dbus = dbus
self.connected = False
self.connect()
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = super(DBusProxy, cls).__new__(cls)
return cls.__instance
def connect(self):
self.connected = False
if self._proxy:
self._proxy.close()
try:
self._proxy = self.dbus.SystemBus().get_object(
'org.freedesktop.problems', '/org/freedesktop/problems')
except self.dbus.exceptions.DBusException as e:
logging.debug('Unable to get dbus proxy: {0}'.format(e))
return
try:
self._iface = self.dbus.Interface(self._proxy,
'org.freedesktop.problems')
except self.dbus.exceptions.DBusException as e:
logging.debug('Unable to get dbus interface: {0}'.format(e))
return
self.connected = True
def _dbus_call(self, fun_name, *args):
try:
logging.debug('Calling {0} with {1}'.format(fun_name, args))
return getattr(self._iface, fun_name)(*args)
except self.dbus.exceptions.DBusException as e:
dbname = e.get_dbus_name()
if dbname == "org.freedesktop.DBus.Error.ServiceUnknown":
self.connect()
return getattr(self._iface, fun_name)(*args)
if dbname == 'org.freedesktop.problems.AuthFailure':
raise problem.exception.AuthFailure(e)
if dbname == 'org.freedesktop.problems.InvalidProblemDir':
raise problem.exception.InvalidProblem(e)
raise
def get_item(self, dump_dir, name):
val = self._dbus_call('GetInfo', dump_dir, [name])
if name not in val:
return None
return str(val[name])
def set_item(self, dump_dir, name, value):
return self._dbus_call('SetElement', dump_dir, name, str(value))
def del_item(self, dump_dir, name):
return self._dbus_call('DeleteElement', dump_dir, name)
def create(self, problem_dict):
return self._dbus_call('NewProblem', problem_dict)
def delete(self, dump_dir):
return self._dbus_call('DeleteProblem', [dump_dir])
def list(self):
return [str(prob) for prob in self._dbus_call('GetProblems')]
def list_all(self):
return [str(prob) for prob in self._dbus_call('GetAllProblems')]
class SocketProxy(object):
def create(self, problem_dict):
import socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(5)
try:
sock.connect('/var/run/abrt/abrt.socket')
sock.sendall("PUT / HTTP/1.1\r\n\r\n")
for key, value in problem_dict.items():
sock.sendall('{0}={1}\0'.format(key.upper(), value))
sock.shutdown(socket.SHUT_WR)
resp = ''
while True:
buf = sock.recv(256)
if not buf:
break
resp += buf
return resp
except socket.timeout as exc:
logging.error('communication with daemon failed: {0}'.format(exc))
return None
def get_item(self, *args):
raise NotImplementedError
def set_item(self, *args):
raise NotImplementedError
def del_item(self, *args):
raise NotImplementedError
def delete(self, *args):
raise NotImplementedError
def list(self, *args):
raise NotImplementedError
def list_all(self, *args):
return self.list(*args)
def get_problem_watcher(self):
raise NotImplementedError
class FsProxy(object):
def __init__(self, directory=problem.config.DEFAULT_DUMP_LOCATION):
self.directory = directory
def create(self, problem_dict):
probd = report.problem_data()
for key, value in problem_dict.items():
probd.add(key, value)
ddir = probd.create_dump_dir(self.directory)
ret = ddir.name
ddir.close()
problem.notify_new_path(ret)
return ret
def _open_ddir(self, dump_dir, readonly=False):
flags = 0
if readonly:
flags |= report.DD_OPEN_READONLY
ddir = report.dd_opendir(dump_dir, flags)
if not ddir:
raise problem.exception.InvalidProblem(
'Can\'t open directory: {0}'.format(dump_dir))
return ddir
def get_item(self, dump_dir, name):
ddir = self._open_ddir(dump_dir, readonly=True)
flags = (report.DD_FAIL_QUIETLY_EACCES |
report.DD_FAIL_QUIETLY_ENOENT |
report.DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE)
val = ddir.load_text(name, flags).encode('utf-8', errors='ignore')
ddir.close()
return val
def set_item(self, dump_dir, name, value):
ddir = self._open_ddir(dump_dir)
ddir.save_text(name, str(value))
ddir.close()
def del_item(self, dump_dir, name):
ddir = self._open_ddir(dump_dir)
ddir.delete_item(name)
ddir.close()
def delete(self, dump_dir):
ddir = report.dd_opendir(dump_dir)
if not ddir:
return not os.path.isdir(dump_dir)
ddir.delete()
return True
def list(self, _all=False):
for dir_entry in os.listdir(self.directory):
dump_dir = os.path.join(self.directory, dir_entry)
if not os.path.isdir(dump_dir) or not os.access(dump_dir, os.R_OK):
continue
uid = os.getuid()
gid = os.getuid()
dir_stat = os.stat(dump_dir)
if not _all and (dir_stat.st_uid != uid and
dir_stat.st_gid != gid):
continue
ddir = report.dd_opendir(dump_dir, report.DD_OPEN_READONLY)
if ddir:
ddir.close()
yield dump_dir
def list_all(self, *args, **kwargs):
kwargs.update(dict(_all=True))
return self.list(*args, **kwargs)
def get_proxy():
try:
import dbus
wrapper = DBusProxy(dbus)
if wrapper.connected:
return wrapper
except ImportError:
logging.debug('DBus not found')
return FsProxy()
|
airtimemedia/abrt
|
src/python-problem/problem/proxies.py
|
Python
|
gpl-2.0
| 6,562
|
from snakebite.client import Client
client = Client('localhost', 9000)
for f in client.copyToLocal(['/input/input.txt'], '/tmp'):
print f
|
MinerKasch/HadoopWithPython
|
python/HDFS/copy_to_local.py
|
Python
|
mit
| 141
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to set IAM policy for an instance resource."""
from googlecloudsdk.api_lib.compute import iam_base_classes
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetIamPolicy(iam_base_classes.RegionalSetIamPolicy):
"""Set the IAM policy for a Google Compute Engine subnetwork resource."""
@staticmethod
def Args(parser):
iam_base_classes.RegionalSetIamPolicy.Args(parser, 'compute.subnetworks')
@property
def service(self):
return self.compute.subnetworks
@property
def resource_type(self):
return 'subnetworks'
SetIamPolicy.detailed_help = iam_base_classes.SetIamPolicyHelp('subnetwork')
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/compute/networks/subnets/set_iam_policy.py
|
Python
|
mit
| 1,265
|
import cs50
def main():
# Get inputs from user
print("Key: ", end="")
key = str.upper(cs50.get_string())
print("Text: ", end="")
text = str.upper(cs50.get_string())
# Initialise cipher variable
cipher = ""
for i in text:
if i.isalpha() == True:
c = ord(i) + (key % 26)
while c > 90:
c -= 26
while c < 64:
c += 26
cipher += chr(c)
else:
cipher += i
print(cipher)
if __name__ == "__main__":
main()
|
Caleb-Ellis/CS50
|
pset6/vigenere.py
|
Python
|
mit
| 556
|
from __future__ import print_function, division
import warnings
def flip_sublat(opstr,indx,lat=0):
sign = 1
opstr = [str(s) for s in opstr]
for s,i,j in zip(opstr,indx,range(len(indx))):
if ((i % 2) == (lat % 2)):
if (s in ['z','y']):
sign *= -1
elif (s == "+"):
opstr[j] = '-'
elif (s == "-"):
opstr[j] = '+'
return sign,"".join(opstr)
def check_T(sort_opstr,operator_list,L,a):
missing_ops=[]
for i in range(0,L//a,1):
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (ind+i*a)%L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_Z(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
z_count = opstr[:i].count("z")
y_count = opstr[:i].count("y")
if ((y_count + z_count) % 2) != 0:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_P(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_PZ(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
sign = (-1)**(opstr[:i].count('z')+opstr.count('y'))
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op[1] = indx
new_op[2] *= sign
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_ZA(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=0)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_ZB(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=1)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
|
weinbe58/QuSpin
|
quspin/basis/basis_1d/_check_1d_symm.py
|
Python
|
bsd-3-clause
| 3,277
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 15:10:34 2016
Script to plot the results of RoBoCoP and RockyCoastCRN
Martin Hurst,
March 7th 2016
@author: mhurst
"""
#import modules
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import rc
# Customise figure style #
rc('font',size=12)
rc('ytick.major',pad=5)
rc('xtick.major',pad=5)
#rc('font',**{'family':'serif','serif':['Helvetica']})
#rc('text', usetex=True)
padding = 5
#create blank figure
plt.figure(1,figsize=(16,9))
#First load the morphology through time
# declare the file and the axis
ProfileName = "../results/Kaikoura/Kaikoura_ShoreProfiles.xz"
f = open(ProfileName,'r')
MorphLines = f.readlines()
NoLines = len(MorphLines)
f.close()
dx = 0.1
dy = 0.1
EndTime = 10001
f = open("../results/Kaikoura/filelist.txt","w")
#Get figure extent
Line = MorphLines[5000].strip().split(" ")
#Read morphology
X = np.array(MorphLine[1:],dtype="float64")
print X
XMin = np.min(X)
XMax = np.max(X)
print XMin, XMax
#Loop through time and make each plot
j=1
for Time in range(0,EndTime+1,10):
print Time, j
#Get header info and setup X coord
MorphLine = (MorphLines[j].strip().split(" "))
j += 1
#Read morphology
X = np.array(MorphLine[1:],dtype="float64")
Z = np.arange(10.,-10.01,-0.1)
HighTideInd = np.argmin(np.abs(0.5-Z))
SLTideInd = np.argmin(np.abs(0-Z))
LowTideInd = np.argmin(np.abs(-0.5-Z))
SkyFillX = np.concatenate((np.array([-10,1000,1000]),X, np.array([-10])))
SkyFillZ = np.concatenate((np.array([100,100,10]),Z, np.array([-100])))
plt.fill(SkyFillX,SkyFillZ,color=[0.9,0.95,1.])
WaterFillX = np.concatenate((np.array([-10]),X[HighTideInd:],np.array([-10])))
WaterFillZ = np.concatenate((np.array([Z[HighTideInd]]),Z[HighTideInd:],np.array([Z[-1]])))
plt.fill(WaterFillX,WaterFillZ,color=[0.7,0.85,1.])
plt.plot([-10,X[HighTideInd]],[0.5,0.5],'--',color=[0.4,0.6,0.8])
plt.plot([-10,X[SLTideInd]],[0.,0.],'-',color=[0.4,0.6,0.8])
plt.plot([-10,X[LowTideInd]],[-0.5,-0.5],'--',color=[0.4,0.6,0.8])
plt.plot(X,Z,'k-')
plt.plot([X[0],1000],[Z[0],Z[0]],'k-')
# tweak the plot
#ax1.set_xticklabels([])
plt.xlabel(r"Distance (m)")
plt.ylabel(r"Elevation (m)")
plt.xlim(XMin,XMax)
plt.ylim(-10,10)
#write the time on the plot
plt.text(-8,14,"Time in years: "+str(Time))
#save plot and mark earthquake
FigName = "../results/Kaikoura/Out"+str(Time)+".png"
f.write(FigName+"\n")
plt.savefig(FigName,dpi=100)
if (Time == 5000):
plt.text(10,9,'EARTHQUAKE')
for i in range(0,26):
FigName = "../results/Kaikoura/Out"+str(Time)+"_EQ"+str(i)+".png"
f.write(FigName+"\n")
plt.savefig(FigName,dpi=100)
plt.clf()
f.close()
|
mdhurst1/RoBoCoP_CRN
|
plotting_functions/plot_kaikoura_animation.py
|
Python
|
gpl-3.0
| 2,926
|
__author__ = 'Eden Thiago Ferreira'
import cProfile as cp
import pstats as pst
from collections import OrderedDict
from grafos import *
from banco_dados import *
from caminhos_minimos import *
class Coletor:
def __init__(self,grafo,nome_mapa=None,path_grafos=None):
"""se path_grafo é None ele gera o grafo aleatoriamente"""
self.nome_mapa = nome_mapa
if path_grafos is None:
self.path_grafos = {}
else:
self.path_grafos = path_grafos
self.grafo = grafo
if nome_mapa is None:
self.grafo.gerar_grafo(rnd.randint(1200,1600),rnd.randint(1200,1600),
rnd.uniform(800,1200),rnd.uniform(800,1200),rnd.uniform(0.02,0.1))
else:
self.grafo.gerar_grafo_mapa(path_grafos[nome_mapa],nome_mapa)
self.profiler = cp.Profile()
self.pt_ori, self.pt_dest = rnd.sample(self.grafo.pontos,2)
self.dij, self.ast = Dijkstra(self.grafo), AStar(self.grafo)
self.executou = False
def get_tempo(self):
with open('status.txt','w') as arq:
dados = pst.Stats(self.profiler,stream=arq)
dados.strip_dirs().print_stats('(start)')
stat = list()
with open('status.txt','r') as arq:
for li in arq.readlines():
if 'start' in li:
stat.append((li.split()))
return stat[1][4]
def exec_dij(self):
self.dij.pt_ori, self.dij.pt_dest = self.pt_ori, self.pt_dest
self.profiler.enable()
self.dij.start()
self.profiler.disable()
self.dij.tempo = self.get_tempo()
def exec_ast(self):
self.ast.pt_ori, self.ast.pt_dest = self.pt_ori, self.pt_dest
self.profiler.enable()
self.ast.start()
self.profiler.disable()
self.ast.tempo = self.get_tempo()
def start(self):
self.exec_dij()
self.exec_ast()
self.executou = True
def dump_dados(self,con):
if self.executou:
if self.nome_mapa is None:
con.inserir_grafo(self.grafo)
else:
con.inserir_grafo_mapa(self.grafo)
dados_dij = OrderedDict()
dados_dij['ponto_origem'] = self.dij.pt_ori
dados_dij['ponto_destino'] = self.dij.pt_dest
dados_dij['caminho'] = str(self.dij.caminho)
dados_dij['num_passos'] = self.dij.num_passos
dados_dij['distancia_total'] = self.dij.dist_total
dados_dij['tempo'] = self.dij.tempo
dados_ast = OrderedDict()
dados_ast['ponto_origem'] = self.ast.pt_ori
dados_ast['ponto_destino'] = self.ast.pt_dest
dados_ast['caminho'] = str(self.ast.caminho)
dados_ast['num_passos'] = self.ast.num_passos
dados_ast['distancia_total'] = self.ast.dist_total
dados_ast['tempo'] = self.ast.tempo
con.inserir_dijkstra((dados_dij['ponto_origem'],dados_dij['ponto_destino'],
dados_dij['caminho'],dados_dij['num_passos'],
dados_dij['distancia_total'],dados_dij['tempo'],self.grafo.ident))
con.inserir_astar((dados_ast['ponto_origem'],dados_ast['ponto_destino'],
dados_ast['caminho'],dados_ast['num_passos'],
dados_ast['distancia_total'],dados_ast['tempo'],self.grafo.ident))
|
edenferreira/Grafo-e-Caminhos-Minimos
|
coletar_dados.py
|
Python
|
bsd-2-clause
| 3,475
|
""" Base class for UserStore """
from abc import ABC, abstractmethod
import traceback
from logging import getLogger
from datetime import datetime
from pytz import timezone as tz
from ..serializer import dumps, loads
from ..models import User
class UserStore(ABC):
"""
Base class for UserStore to read/write user information
Attributes
----------
config : minette.Config
Configuration
timezone : pytz.timezone
Timezone
logger : logging.Logger
Logger
table_name : str
Database table name for read/write user data
sqls : dict
SQLs used in ContextStore
"""
def __init__(self, config=None, timezone=None, logger=None,
table_name="user", **kwargs):
"""
Parameters
----------
config : minette.Config, default None
Configuration
timezone : pytz.timezone, default None
Timezone
logger : logging.Logger, default None
Logger
table_name : str, default "user"
Database table name for read/write user data
"""
self.config = config
self.timezone = timezone or (
tz(config.get("timezone", default="UTC")) if config else tz("UTC"))
self.logger = logger if logger else getLogger(__name__)
self.table_name = table_name
self.sqls = self.get_sqls()
@abstractmethod
def get_sqls(self):
pass
def prepare_table(self, connection, prepare_params=None):
"""
Check and create table if not exist
Parameters
----------
connection : Connection
Connection for prepare
query_params : tuple, default tuple()
Query parameters for checking table
"""
cursor = connection.cursor()
cursor.execute(self.sqls["prepare_check"], prepare_params or tuple())
if not cursor.fetchone():
cursor.execute(self.sqls["prepare_create"])
connection.commit()
return True
else:
return False
def get(self, channel, channel_user_id, connection):
"""
Get user by channel and channel_user_id
Parameters
----------
channel : str
Channel
channel_user_id : str
Channel user ID
connection : Connection
Connection
Returns
-------
user : minette.User
User
"""
user = User(channel=channel, channel_user_id=channel_user_id)
if not channel_user_id:
return user
try:
cursor = connection.cursor()
cursor.execute(self.sqls["get_user"], (channel, channel_user_id))
row = cursor.fetchone()
if row is not None:
# convert to dict
if isinstance(row, dict):
record = row
else:
record = dict(
zip([column[0] for column in cursor.description], row))
# convert type
record["data"] = loads(record["data"])
# restore user
user.id = record["user_id"]
user.name = record["name"]
user.nickname = record["nickname"]
user.profile_image_url = record["profile_image_url"]
user.data = record["data"] if record["data"] else {}
else:
cursor.execute(self.sqls["add_user"], (
channel, channel_user_id, user.id,
datetime.now(self.timezone), user.name, user.nickname,
user.profile_image_url, None)
)
connection.commit()
except Exception as ex:
self.logger.error(
"Error occured in restoring user from database: "
+ str(ex) + "\n" + traceback.format_exc())
return user
def save(self, user, connection):
"""
Save user
Parameters
----------
user : minette.User
User to save
connection : Connection
Connection
"""
user_dict = user.to_dict()
serialized_data = dumps(user_dict["data"])
cursor = connection.cursor()
cursor.execute(self.sqls["save_user"], (
datetime.now(self.timezone), user.name, user.nickname,
user.profile_image_url, serialized_data, user.channel,
user.channel_user_id)
)
connection.commit()
|
uezo/minette-python
|
minette/datastore/userstore.py
|
Python
|
apache-2.0
| 4,577
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import logging
import logging.config
import celery
from configobj import ConfigObj, flatten_errors
from validate import Validator
from flask import current_app
import os
import sys
def configure_logger(app):
"""
initialize logging
"""
if 'LOGGER' in app.config:
logging.config.dictConfig(app.config['LOGGER'])
else: # Default is std out
handler = logging.StreamHandler(stream=sys.stdout)
app.logger.addHandler(handler)
app.logger.setLevel('INFO')
def make_celery(app):
celery_app = celery.Celery(app.import_name,
broker=app.config['CELERY_BROKER_URL'])
celery_app.conf.update(app.config)
TaskBase = celery_app.Task
class ContextTask(TaskBase):
abstract = True
def __init__(self):
pass
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery_app.Task = ContextTask
return celery_app
class InstanceConfig(object):
def __init__(self):
self.source_directory = None
self.backup_directory = None
self.target_file = None
self.exchange = None
self.synonyms_file = None
self.aliases_file = None
self.pg_host = None
self.pg_dbname = None
self.pg_username = None
self.pg_password = None
self.pg_port = None
self.name = None
def load_instance_config(instance_name):
confspec = []
confspec.append('[instance]')
confspec.append('source-directory = string()')
confspec.append('backup-directory = string()')
confspec.append('target-file = string()')
confspec.append('exchange = string(default="navitia")')
confspec.append('synonyms_file = string(default="")')
confspec.append('aliases_file = string(default="")')
confspec.append('name = string()')
confspec.append('is-free = boolean(default=False)')
confspec.append('[database]')
confspec.append('host = string()')
confspec.append('dbname = string()')
confspec.append('username = string()')
confspec.append('password = string()')
confspec.append('port = string(default="5432")')
ini_file = '%s/%s.ini' % \
(current_app.config['INSTANCES_DIR'], instance_name)
if not os.path.isfile(ini_file):
raise ValueError("File doesn't exists or is not a file %s" % ini_file)
config = ConfigObj(ini_file, configspec=confspec, stringify=True)
val = Validator()
res = config.validate(val, preserve_errors=True)
#validate retourne true, ou un dictionaire ...
if type(res) is dict:
error = build_error(config, res)
raise ValueError("Config is not valid: %s in %s"
% (error, ini_file))
instance = InstanceConfig()
instance.source_directory = config['instance']['source-directory']
instance.backup_directory = config['instance']['backup-directory']
instance.target_file = config['instance']['target-file']
instance.exchange = config['instance']['exchange']
instance.synonyms_file = config['instance']['synonyms_file']
instance.aliases_file = config['instance']['aliases_file']
instance.name = config['instance']['name']
instance.is_free = config['instance']['is-free']
instance.pg_host = config['database']['host']
instance.pg_dbname = config['database']['dbname']
instance.pg_username = config['database']['username']
instance.pg_password = config['database']['password']
instance.pg_port = config['database']['port']
return instance
def build_error(config, validate_result):
"""
construit la chaine d'erreur si la config n'est pas valide
"""
result = ""
for entry in flatten_errors(config, validate_result):
# each entry is a tuple
section_list, key, error = entry
if key is not None:
section_list.append(key)
else:
section_list.append('[missing section]')
section_string = ', '.join(section_list)
if error is False:
error = 'Missing value or section.'
result += section_string + ' => ' + str(error) + "\n"
return result
def get_instance_logger(instance):
"""
return the logger for this instance
get the logger name 'instance' as the parent logger
For file handler, all log will be in a file specific to this instance
"""
logger = logging.getLogger('instance.{0}'.format(instance.name))
# if the logger has already been inited, we can stop
if logger.handlers:
return logger
for handler in logger.parent.handlers:
#trick for FileHandler, we change the file name
if isinstance(handler, logging.FileHandler):
#we use the %(name) notation to use the same grammar as the python module
log_filename = handler.stream.name.replace('%(name)', instance.name)
new_handler = logging.FileHandler(log_filename)
new_handler.setFormatter(handler.formatter)
handler = new_handler
logger.addHandler(handler)
logger.propagate = False
return logger
def get_named_arg(arg_name, func, args, kwargs):
if kwargs and arg_name in kwargs:
return kwargs[arg_name]
else:
idx = func.func_code.co_varnames.index(arg_name)
if args and idx < len(args):
return args[idx]
else:
return None
|
ballouche/navitia
|
source/tyr/tyr/helper.py
|
Python
|
agpl-3.0
| 6,649
|
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "140_ShadowFoxPart2"
# NPCs
KLUCK = 30895
XENOVIA = 30912
# ITEMs
CRYSTAL = 10347
OXYDE = 10348
CRYPT = 10349
# MONSTERs
NPC=[20789,20790,20791,20792]
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [CRYSTAL,OXYDE,CRYPT]
def onEvent (self,event,st) :
htmltext = event
id = st.getState()
cond = st.getInt("cond")
if event == "30895-02.htm" :
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "30895-05.htm" :
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
elif event == "30895-09.htm" :
st.playSound("ItemSound.quest_finish")
st.unset("talk")
st.exitQuest(False)
st.giveItems(57, 18775)
if st.getPlayer().getLevel() >= 37 and st.getPlayer().getLevel() <= 42:
st.addExpAndSp(30000,2000)
st.setState(COMPLETED)
elif event == "30912-07.htm" :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
elif event == "30912-09.htm" :
st.takeItems(CRYSTAL, 5)
if st.getRandom(100) <= 60 :
st.giveItems(OXYDE,1)
if st.getQuestItemsCount(OXYDE) >= 3 :
htmltext = "30912-09b.htm"
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
st.takeItems(CRYSTAL, -1)
st.takeItems(OXYDE, -1)
st.giveItems(CRYPT,1)
else:
htmltext = "30912-09a.htm"
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if id == CREATED : return htmltext
if id == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif npcId == KLUCK :
if cond == 0 :
if player.getLevel() >= 37:
htmltext = "30895-01.htm"
else:
htmltext = "30895-00.htm"
elif cond == 1 :
htmltext = "30895-02.htm"
elif cond in [2,3] :
htmltext = "30895-06.htm"
elif cond == 4 :
if st.getInt("talk"):
htmltext = "30895-08.htm"
else:
htmltext = "30895-07.htm"
st.takeItems(CRYPT, -1)
st.set("talk","1")
elif npcId == XENOVIA :
if cond == 2 :
htmltext = "30912-01.htm"
elif cond == 3 :
if st.getQuestItemsCount(CRYSTAL) >= 5 :
htmltext = "30912-08.htm"
else:
htmltext = "30912-07.htm"
elif cond == 4 :
htmltext = "30912-10.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getInt("cond")==3 and st.getRandom(100) <= 80 :
st.playSound("ItemSound.quest_itemget")
st.giveItems(CRYSTAL,1)
return
def onFirstTalk (self,npc,player):
st = player.getQuestState(qn)
if not st :
st = self.newQuestState(player)
qs = player.getQuestState("139_ShadowFoxPart1")
if qs :
if qs.getState().getName() == 'Completed' and st.getState() == CREATED :
st.setState(STARTED)
npc.showChatWindow(player)
return
QUEST = Quest(140,qn,"Shadow Fox - 2")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addFirstTalkId(KLUCK) #this quest doesnt have starter npc, quest will appear in list only when u finish quest 139
QUEST.addTalkId(KLUCK)
QUEST.addTalkId(XENOVIA)
for mob in NPC :
QUEST.addKillId(mob)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/140_ShadowFoxPart2/__init__.py
|
Python
|
gpl-3.0
| 4,120
|
"""
Etymology plugin
Authors:
- GhettoWizard
- Scaevolus
- linuxdaemon <linuxdaemon@snoonet.org>
"""
import re
import requests
from bs4 import BeautifulSoup
from requests import HTTPError
from cloudbot import hook
@hook.command("e", "etymology")
def etymology(text, reply):
"""<word> - retrieves the etymology of <word>
:type text: str
"""
response = requests.get('https://www.etymonline.com/search', params={"q": text})
try:
response.raise_for_status()
except HTTPError as e:
reply("Error reaching etymonline.com: {}".format(e.response.status_code))
raise
soup = BeautifulSoup(response.text, "lxml")
result = soup.find('a', class_=re.compile("word--.+"))
if not result:
return 'No etymology found'
title = result.div.p.text.strip()
etym = result.div.section.text.strip()
url = result['href']
# Strip ellipsis
if etym.endswith(" …"):
etym = etym[:-2]
out = '[h1]{}:[/h1] {}'.format(title, etym)
if len(out) > 400:
out = out[:out.rfind(' ', 0, 400)] + ' ...'
return "{} [div] [h3]http://www.etymonline.com{}[/h3]".format(out, url)
|
valesi/CloudBot
|
plugins/etymology.py
|
Python
|
gpl-3.0
| 1,175
|
#!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_vscan_scanner_pool
short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Configure a Vscan Scanner Pool
options:
state:
description:
- Whether a Vscan Scanner pool is present or not
choices: ['present', 'absent']
default: present
vserver:
description:
- the name of the data vserver to use.
required: true
hostnames:
description:
- List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
privileged_users:
description:
- List of privileged usernames. Username must be in the form "domain-name\\user-name"
scanner_pool:
description:
- the name of the virus scanner pool
required: true
scanner_policy:
description:
- The name of the Virus scanner Policy
choices: ['primary', 'secondary', 'idle']
'''
EXAMPLES = """
- name: Create and enable Scanner pool
na_ontap_vscan_scanner_pool:
state: present
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
hostnames: ['name', 'name2']
privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
scanner_pool: Scanner1
scanner_policy: primary
- name: Delete a scanner pool
na_ontap_vscan_scanner_pool:
state: absent
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
scanner_pool: Scanner1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVscanScannerPool(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
hostnames=dict(requried=False, type='list'),
privileged_users=dict(required=False, type='list'),
scanner_pool=dict(required=True, type='str'),
scanner_policy=dict(required=False, choices=['primary', 'secondary', 'idle'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
self.hostnames = parameters['hostnames']
self.vserver = parameters['vserver']
self.privileged_users = parameters['privileged_users']
self.scanner_pool = parameters['scanner_pool']
self.state = parameters['state']
self.scanner_policy = parameters['scanner_policy']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def create_scanner_pool(self):
"""
Create a Vscan Scanner Pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
if self.hostnames:
string_obj = netapp_utils.zapi.NaElement('hostnames')
scanner_pool_obj.add_child_elem(string_obj)
for hostname in self.hostnames:
string_obj.add_new_child('string', hostname)
if self.privileged_users:
users_obj = netapp_utils.zapi.NaElement('privileged-users')
scanner_pool_obj.add_child_elem(users_obj)
for user in self.privileged_users:
users_obj.add_new_child('privileged-user', user)
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def apply_policy(self):
"""
Apply a Scanner policy to a Scanner pool
:return: nothing
"""
apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
apply_policy_obj.add_new_child('scanner-policy', self.scanner_policy)
apply_policy_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(apply_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error applying policy %s to pool %s: %s' %
(self.scanner_policy, self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def get_scanner_pool(self):
"""
Check to see if a scanner pool exist or not
:return: True if it exist, False if it does not
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
scanner_pool_info = netapp_utils.zapi.NaElement('scan-scanner-pool-info')
scanner_pool_info.add_new_child('scanner-pool', self.scanner_pool)
scanner_pool_info.add_new_child('vserver', self.vserver)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(scanner_pool_info)
scanner_pool_obj.add_child_elem(query)
try:
result = self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
'scanner-pool') == self.scanner_pool:
return result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
return False
return False
def delete_scanner_pool(self):
"""
Delete a Scanner pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def apply(self):
self.asup_log_for_cserver("na_ontap_vscan_scanner_pool")
changed = False
scanner_pool_obj = self.get_scanner_pool()
if self.state == 'present':
if not scanner_pool_obj:
self.create_scanner_pool()
if self.scanner_policy:
self.apply_policy()
changed = True
# apply Scanner policy
if scanner_pool_obj:
if self.scanner_policy:
if scanner_pool_obj.get_child_content('scanner-policy') != self.scanner_policy:
self.apply_policy()
changed = True
if self.state == 'absent':
if scanner_pool_obj:
self.delete_scanner_pool()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
"""
command = NetAppOntapVscanScannerPool()
command.apply()
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_vscan_scanner_pool.py
|
Python
|
mit
| 9,108
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-24 03:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0004_overview_slideproduct_techspec'),
]
operations = [
migrations.RemoveField(
model_name='overview',
name='product',
),
migrations.AddField(
model_name='overview',
name='category',
field=models.ForeignKey(blank=True, default='', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='overcat', to='product.Category', verbose_name='Category'),
),
]
|
skylifewww/pangolin-fog
|
product/migrations/0005_auto_20170124_0341.py
|
Python
|
mit
| 741
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Arguments:
cells: List of RNN cell instances.
Examples:
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you reply on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
# States are a flat list of the individual cell state size.
# e.g. states of a 2-layer LSTM would be `[h1, c1, h2, c2]`.
# (assuming one LSTM has states [h, c])
# In the case of reverse_state_order=True, the state_size will be
# [h2, c2, h1, c1].
state_size = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
if _is_multiple_state(cell.state_size):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
# The init state is flattened into a list because state_size is a flattened
# list.
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return nest.flatten(initial_states)
def call(self, inputs, states, constants=None, **kwargs):
# Recover per-cell states.
nested_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
if _is_multiple_state(cell.state_size):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
if self.reverse_state_order:
nested_states = nested_states[::-1]
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states, constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
# Format the new states as a flat list
new_states = []
if self.reverse_state_order:
new_nested_states = new_nested_states[::-1]
for cell_states in new_nested_states:
new_states += cell_states
return inputs, new_states
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
constants_shape = input_shape[1:]
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if generic_utils.has_arg(cell.call, 'constants'):
cell.build([input_shape] + constants_shape)
else:
cell.build(input_shape)
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = tuple([input_shape[0]] +
tensor_shape.as_shape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({
'class_name': cell.__class__.__name__,
'config': cell.get_config()
})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
losses += cell.losses
return losses + self._losses
@property
def updates(self):
updates = []
for cell in self.cells:
if isinstance(cell, Layer):
updates += cell.updates
return updates + self._updates
@tf_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
Arguments:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per
state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- a `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- a `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if user didn't specify any initial state via other
means. The returned initial state should be in shape of
[batch, cell.state_size]. Cell might choose to create zero filled
tensor, or with other values based on the cell implementations.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be None during the graph construction. Either
the `inputs` or the pair of `batch` and `dtype `are provided.
`batch` is a scalar tensor that represent the batch size
of the input. `dtype` is `tf.dtype` that represent the dtype of
the input.
For backward compatible reason, if this method is not implemented
by the cell, RNN layer will create a zero filled tensors with the
size of [batch, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on after the other in the RNN, implementing an
efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer or tuple of integers).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Input shape:
N-D tensor with shape `(batch_size, timesteps, ...)` or
`(timesteps, batch_size, ...)` when time_major is True.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- if `return_sequences`: N-D tensor with shape
`(batch_size, timesteps, output_size)`, where `output_size` could
be a high dimension tensor shape, or
`(timesteps, batch_size, output_size)` when `time_major` is True.
- else, N-D tensor with shape `(batch_size, output_size)`, where
`output_size` could be a high dimension tensor shape.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self.cell, name='cell')
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for flattened inputs.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
self._num_inputs = None
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
constants_shape = nest.map_structure(
lambda s: tuple(tensor_shape.TensorShape(s).as_list()),
constants_shape)
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not nest.is_sequence(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
flat_input_shapes = nest.flatten(input_shape)
flat_input_shapes = nest.map_structure(get_input_spec, flat_input_shapes)
assert len(flat_input_shapes) == self._num_inputs
if self.input_spec is not None:
self.input_spec[:self._num_inputs] = flat_input_shapes
else:
self.input_spec = flat_input_shapes
step_input_shape = nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
self.state_spec = [
InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list())
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in call()
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
if len(cell_state_sizes) == len(init_state_specs):
for i in range(len(cell_state_sizes)):
if not tensor_shape.TensorShape(
# Ignore the first axis for init_state which is for batch
init_state_specs[i].shape[1:]).is_compatible_with(
tensor_shape.TensorShape(cell_state_sizes[i])):
raise validation_error
else:
raise validation_error
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if nest.is_sequence(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = nest.flatten(inputs)[0]
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not nest.is_sequence(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants,
self._num_inputs)
# in case the real inputs is a nested structure, set the size of flatten
# input so that we can distinguish between real inputs, initial_state and
# constants.
self._num_inputs = len(nest.flatten(inputs))
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = [
InputSpec(shape=K.int_shape(state)) for state in initial_state
]
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=K.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = [None for _ in range(len(nest.flatten(inputs)))
] + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if self._num_constants is None:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError(
'Layer has ' + str(len(self.states)) + ' states but was passed ' +
str(len(initial_state)) + ' initial states.')
if nest.is_sequence(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = K.int_shape(nest.flatten(inputs)[0])
else:
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(
inputs, states, constants=constants, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(inputs, states, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
if self.time_major:
batch_size = self.input_spec[0].shape[1]
else:
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if _is_multiple_state(self.cell.state_size):
self.states = [
K.zeros([batch_size] + tensor_shape.as_shape(dim).as_list())
for dim in self.cell.state_size
]
else:
self.states = [
K.zeros([batch_size] +
tensor_shape.as_shape(self.cell.state_size).as_list())
]
elif states is None:
if _is_multiple_state(self.cell.state_size):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state,
np.zeros([batch_size] +
tensor_shape.as_shape(dim).as_list()))
else:
K.set_value(self.states[0], np.zeros(
[batch_size] +
tensor_shape.as_shape(self.cell.state_size).as_list()))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if _is_multiple_state(self.cell.state_size):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != tuple([batch_size] +
tensor_shape.as_shape(dim).as_list()):
raise ValueError(
'State ' + str(index) + ' is incompatible with layer ' +
self.name + ': expected shape=' + str(
(batch_size, dim)) + ', found shape=' + str(value.shape))
# TODO(fchollet): consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {
'class_name': self.cell.__class__.__name__,
'config': cell_config
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
layer_losses = super(RNN, self).losses
if isinstance(self.cell, Layer):
return self.cell.losses + layer_losses
return layer_losses
@property
def updates(self):
updates = []
if isinstance(self.cell, Layer):
updates += self.cell.updates
return updates + self._updates
@tf_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(prev_output),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
if training is None and not context.executing_eagerly():
# This would be harmless to set in eager mode, but eager tensors
# disallow setting arbitrary attributes.
output._uses_learning_phase = True
return output, [output]
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@tf_export('keras.layers.GRUCell')
class GRUCell(Layer):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if not self.reset_after:
self.input_bias, self.recurrent_bias = self.bias, None
else:
self.input_bias = K.flatten(self.bias[0])
self.recurrent_bias = K.flatten(self.bias[1])
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(h_tm1),
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel[:, :self.units])
x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = K.bias_add(x_z, self.input_bias[:self.units])
x_r = K.bias_add(x_r, self.input_bias[self.units: self.units * 2])
x_h = K.bias_add(x_h, self.input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = K.dot(h_tm1_r,
self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, self.recurrent_bias[:self.units])
recurrent_r = K.bias_add(recurrent_r,
self.recurrent_bias[self.units:
self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h,
self.recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h,
self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, self.input_bias)
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
x_h = matrix_x[:, 2 * self.units:]
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, self.recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * matrix_inner[:, 2 * self.units:]
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None and not context.executing_eagerly():
# This would be harmless to set in eager mode, but eager tensors
# disallow setting arbitrary attributes.
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@tf_export('keras.layers.GRU')
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@tf_export('keras.layers.LSTMCell')
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).x
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(states[0]),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel[:, :self.units])
x_f = K.dot(inputs_f, self.kernel[:, self.units:self.units * 2])
x_c = K.dot(inputs_c, self.kernel[:, self.units * 2:self.units * 3])
x_o = K.dot(inputs_o, self.kernel[:, self.units * 3:])
if self.use_bias:
x_i = K.bias_add(x_i, self.bias[:self.units])
x_f = K.bias_add(x_f, self.bias[self.units:self.units * 2])
x_c = K.bias_add(x_c, self.bias[self.units * 2:self.units * 3])
x_o = K.bias_add(x_o, self.bias[self.units * 3:])
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(
x_f + K.dot(h_tm1_f,
self.recurrent_kernel[:, self.units: self.units * 2]))
c = f * c_tm1 + i * self.activation(
x_c + K.dot(h_tm1_c,
self.recurrent_kernel[:, self.units * 2: self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units:2 * self.units]
z2 = z[:, 2 * self.units:3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None and not context.executing_eagerly():
# This would be harmless to set in eager mode, but eager tensors
# disallow setting arbitrary attributes.
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@tf_export('keras.layers.LSTM')
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn('%s: Note that this layer is not optimized for performance. '
'Please use tf.keras.layers.CuDNNLSTM for better '
'performance on GPU.', self)
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return K.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(
inputs, initial_state, constants, num_constants, num_inputs=1):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Arguments:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
num_inputs: Expected number of real input tensors (exclude initial_states
and constants).
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants, and more importantly,
# the real inputs will be in a flat list, instead of nested tuple.
#
# For either case, we will use num_inputs to split the input list, and
# restructure the real input into tuple.
assert initial_state is None and constants is None
if num_constants is not None:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if num_inputs is None:
num_inputs = 1
if len(inputs) > num_inputs:
initial_state = inputs[num_inputs:]
inputs = inputs[:num_inputs]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
|
alshedivat/tensorflow
|
tensorflow/python/keras/layers/recurrent.py
|
Python
|
apache-2.0
| 100,102
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.