code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
"""
Functions for the analysis of integral field spectroscopy.
Author: Daniel Ruschel Dutra
Website: https://github.com/danielrd6/ifscube
"""
from numpy import *
import pyfits as pf
import spectools as st
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from copy import deepcopy
from voronoi_2d_binning import voronoi_2d_binning
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.ndimage import gaussian_filter as gf
from scipy.integrate import trapz
from scipy.interpolate import interp1d
from scipy import ndimage
import profiles as lprof
import ppxf
import ppxf_util
def progress(x, xmax, steps=10):
try:
if x%(xmax/steps) == 0:
print '{:2.0f}%\r'.format(float(x)/float(xmax)*100)
except ZeroDivisionError:
pass
class gmosdc:
"""
A class for dealing with data cubes, originally written to work
with GMOS IFU.
"""
def __init__(self, fitsfile, redshift=None, vortab=None):
"""
Initializes the class and loads basic information onto the
object.
Parameters:
-----------
fitstile : string
Name of the FITS file containing the GMOS datacube. This
should be the standard output from the GFCUBE task of the
GEMINI-GMOS IRAF package.
redshift : float
Value of redshift (z) of the source, if no Doppler
correction has
been applied to the spectra yet.
vortab : string
Name of the file containing the Voronoi binning table
Returns:
--------
Nothing.
"""
if len(pf.open(fitsfile)) == 2:
dataext, hdrext = 1,0
elif len(pf.open(fitsfile)) == 1:
dataext, hdrext = 0,0
self.data = pf.getdata(fitsfile,ext=dataext)
self.header_data = pf.getheader(fitsfile, ext=dataext)
self.header = pf.getheader(fitsfile, ext=hdrext)
self.wl = st.get_wl(fitsfile, hdrext=dataext, dimension=0,
dwlkey='CD3_3', wl0key='CRVAL3', pix0key='CRPIX3')
if redshift == None:
try:
redshift = self.header['REDSHIFT']
except KeyError:
print 'WARNING! Redshift not given and not found in the image'\
+ ' header. Using redshift = 0.'
redshift = 0.0
self.restwl = self.wl/(1.+redshift)
try:
if self.header['VORBIN'] and vortab != None:
self.voronoi_tab = vortab
self.binned = True
elif self.header['VORBIN'] and vortab == None:
print 'WARNING! Data has been binned but no binning table has'\
+ ' been given.'
self.binned = True
except KeyError:
self.binned = False
self.fitsfile = fitsfile
self.redshift = redshift
self.spec_indices = column_stack([
ravel(indices(shape(self.data)[1:])[0]),
ravel(indices(shape(self.data)[1:])[1])
])
def continuum(self, writefits=False, outimage=None, fitting_window=None,
copts=None):
"""
Evaluates a polynomial continuum for the whole cube and stores
it in self.cont.
"""
if self.binned:
v = loadtxt(self.voronoi_tab)
xy = v[unique(v[:,2], return_index=True)[1],:2]
else:
xy = self.spec_indices
fw = fitting_window
fwidx = (self.restwl > fw[0]) & (self.restwl < fw[1])
wl = deepcopy(self.restwl[fwidx])
data = deepcopy(self.data[fwidx])
c = zeros(shape(data), dtype='float32')
nspec = len(xy)
if copts == None:
copts = {'degr':3, 'upper_threshold':2,
'lower_threshold':2, 'niterate':5}
try:
copts['returns']
except KeyError:
copts['returns'] = 'function'
for k,h in enumerate(xy):
i,j = h
s = deepcopy(data[:,i,j])
if any(s[:20]) and any(s[-20:]):
try:
cont = st.continuum(wl, s, **copts)
if self.binned:
for l,m in v[v[:,2] == k,:2]:
c[:,l,m] = cont[1]
else:
c[:,i,j] = cont[1]
except TypeError:
print 'Could not find a solution for {:d},{:d}.'\
.format(i,j)
return wl, s
else:
c[:,i,j] = zeros(len(wl), dtype='float32')
self.cont = c
if writefits:
if outimage == None:
outimage = self.fitsfile.replace('.fits','_continuum.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
hdr['CRVAL3'] = wl[0]
hdr.append(('CONTDEGR', copts['degr'],
'Degree of continuum polynomial'))
hdr.append(('CONTNITE', copts['niterate'],
'Continuum rejection iterations'))
hdr.append(('CONTLTR', copts['lower_threshold'],
'Continuum lower threshold'))
hdr.append(('CONTHTR', copts['upper_threshold'],
'Continuum upper threshold'))
pf.writeto(outimage, data=c, header=hdr)
return c
def snr_eval(self, wl_range=[6050,6200], copts=None):
"""
Measures the signal to noise ratio (SNR) for each spectrum in a
data cube, returning an image of the SNR.
Parameters:
-----------
self : gmosdc instance
gmosdc object
wl_range : array like
An array like object containing two wavelength coordinates
that define the SNR window at the rest frame.
copts : dictionary
Options for the continuum fitting function.
Returns:
--------
snr : numpy.ndarray
Image of the SNR for each spectrum.
Description:
------------
This method evaluates the SNR for each spectrum in a data
cube by measuring the residuals of a polynomial continuum
fit. The function CONTINUUM of the SPECTOOLS package is used
to provide the continuum, with zero rejection iterations
and a 3 order polynomial.
"""
noise = zeros(shape(self.data)[1:], dtype='float32')
signal = zeros(shape(self.data)[1:], dtype='float32')
snrwindow = (self.restwl >= wl_range[0]) &\
(self.restwl <= wl_range[1])
data = deepcopy(self.data)
wl = self.restwl[snrwindow]
if copts == None:
copts = {'niterate':0, 'degr':3, 'upper_threshold':3,
'lower_threshold':3, 'returns':'function'}
else:
copts['returns'] = 'function'
for i,j in self.spec_indices:
if any(data[snrwindow,i,j]):
s = data[snrwindow,i,j]
cont = st.continuum(wl, s, **copts)[1]
noise[i,j] = nanstd(s - cont)
signal[i,j] = nanmean(cont)
else:
noise[i,j],signal[i,j] = nan, nan
self.noise = noise
self.signal = signal
return array([signal,noise])
def wlprojection(self, wl0, fwhm=10, filtertype='box', writefits=False,
outimage='wlprojection.fits'):
"""
Writes a projection of the data cube along the wavelength
coordinate, with the flux given by a given type of filter.
Parameters:
-----------
wl0 : float
Central wavelength at the rest frame.
fwhm : float
Full width at half maximum. See 'filtertype'.
filtertype : string
Type of function to be multiplied by the spectrum to return
the argument for the integral.
'box' = Box function that is zero everywhere and 1
between wl0-fwhm/2 and wl0+fwhm/2.
'gaussian' = Normalized gaussian function with center at
wl0 and sigma = fwhm/(2*sqrt(2*log(2)))
outimage : string
Name of the output image
Returns:
--------
Nothing.
"""
if filtertype == 'box':
arrfilt = array( (self.restwl >= wl0-fwhm/2.) &
(self.restwl <= wl0+fwhm/2.), dtype='float')
arrfilt /= trapz(arrfilt,self.restwl)
elif filtertype == 'gaussian':
s = fwhm/(2.*sqrt(2.*log(2.)))
arrfilt = 1./sqrt(2*pi)*exp(-(self.restwl-wl0)**2/2./s**2)
else:
print 'ERROR! Parameter filtertype "{:s}" not understood.'\
.format(filtertype)
outim = zeros(shape(self.data)[1:], dtype='float32')
for i,j in self.spec_indices:
outim[i,j] = trapz(self.data[:,i,j]*arrfilt, self.restwl)
if writefits:
hdr = deepcopy(self.header)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift, 'Redshift used in GMOSDC'))
hdr.append(('WLPROJ', True, 'Processed by WLPROJECTION?'))
hdr.append(('WLPRTYPE', filtertype,
'Type of filter used in projection.'))
hdr.append(('WLPRWL0', wl0, 'Central wavelength of the filter.'))
hdr.append(('WLPRFWHM', fwhm, 'FWHM of the projection filter.'))
pf.writeto(outimage,data=outim,header=hdr)
return outim
def plotspec(self, x, y):
"""
Plots the spectrum at coordinates x,y.
Parameters
----------
x,y : numbers or tuple
If x and y are numbers plots the spectrum at the specific
spaxel. If x and y are two element tuples plots the average
between x[0],y[0] and x[1],y[1]
Returns
-------
Nothing.
"""
fig = plt.figure(1)
ax = plt.axes()
try:
if len(x) == 2 and len(y) == 2:
s = average(average(self.data[:,y[0]:y[1],x[0]:x[1]], 1), 1)
except TypeError:
s = self.data[:,y,x]
ax.plot(self.restwl, s)
plt.show()
def linefit(self, p0, function='gaussian', fitting_window=None,
writefits=False, outimage=None, variance=None,
constraints=(), bounds=None, inst_disp=1.0, individual_spec=False,
min_method='SLSQP', minopts=None, copts=None,
refit=False, spiral_loop=False, spiral_center=None,
fit_continuum=True, refit_radius=3):
"""
Fits a spectral feature with a gaussian function and returns a
map of measured properties. This is a wrapper for the scipy
minimize function that basically iterates over the cube,
has a formula for the reduced chi squared, and applies
an internal scale factor to the flux.
Parameters
----------
p0 : iterable
Initial guess for the fitting funcion, consisting of a list
of 3N parameters for N components of **function**. In the
case of a gaussian fucntion, these parameters must be given
as [amplitude0, center0, sigma0, amplitude1, center1, ...].
function : string
The function to be fitted to the spectral features.
Available options and respective parameters are:
'gaussian' : amplitude, central wavelength in angstroms,
sigma in angstroms
'gauss_hermite' : amplitude, central wavelength in
angstroms, sigma in angstroms, h3 and h4
fitting_window : iterable
Lower and upper wavelength limits for the fitting
algorithm. These limits should allow for a considerable
portion of continuum besides the desired spectral features.
writefits : boolean
Writes the results in a FITS file.
outimage : string
Name of the FITS file in which to write the results.
variance : float, 1D, 2D or 3D array
The variance of the flux measurments. It can be given
in one of four formats. If variance is a float it is
applied as a contant to the whole spectrum. If given as 1D
array it assumed to be a spectrum that will be applied to
the whole cube. As 2D array, each spaxel will be applied
equally to all wavelenths. Finally the 3D array must
represent the variance for each elemente of the data cube.
It defaults to None, in which case it does not affect the
minimization algorithm, and the returned Chi2 will be in
fact just the fit residuals.
inst_disp : number
Instrumental dispersion in pixel units. This argument is
used to evaluate the reduced chi squared. If let to default
it is assumed that each wavelength coordinate is a degree
of freedom. The physically sound way to do it is to use the
number of dispersion elements in a spectrum as the degrees
of freedom.
bounds : sequence
Bounds for the fitting algorithm, given as a list of
[xmin, xmax] pairs for each x parameter.
constraints : dict or sequence of dicts
See scipy.optimize.minimize
min_method : string
Minimization method. See scipy.optimize.minimize.
minopts : dict
Dictionary of options to be passed to the minimization
routine. See scipy.optimize.minimize.
individual_spec : False or x,y pair
Pixel coordinates for the spectrum you wish to fit
individually.
copts : dict
Arguments to be passed to the spectools.continuum function.
refit : boolean
Use parameters from nearby sucessful fits as the initial
guess for the next fit.
spiral_loop : boolean
Begins the fitting with the central spaxel and continues
spiraling outwards.
spiral_center : iterable
Central coordinates for the beginning of the spiral given
as a list of two coordinates [x0, y0]
fit_continuum : boolean
If True fits the continuum just before attempting to fit
the emission lines. Setting this option to False will
cause the algorithm to look for self.cont, which should
contain a data cube of continua.
Returns
-------
sol : numpy.ndarray
A data cube with the solution for each spectrum occupying
the respective position in the image, and each position in
the first axis giving the different parameters of the fit.
See also
--------
scipy.optimize.curve_fit, scipy.optimize.leastsq
"""
if function == 'gaussian':
fit_func = lprof.gauss
self.fit_func = lprof.gauss
npars_pc = 3
elif function == 'gauss_hermite':
fit_func = lprof.gausshermite
self.fit_func = lprof.gausshermite
npars_pc = 5
else:
raise NameError('Unknown function "{:s}".'.format(function))
if fitting_window != None:
fw = (self.restwl > fitting_window[0]) &\
(self.restwl < fitting_window[1])
else:
fw = Ellipsis
if copts == None:
copts = {'niterate':5, 'degr':4, 'upper_threshold':2,
'lower_threshold':2}
copts['returns'] = 'function'
try:
minopts['eps']
except TypeError:
if minopts == None:
minopts = {'eps': 1e-3}
else:
minopts['eps'] = 1e-3
wl = deepcopy(self.restwl[fw])
scale_factor = median(self.data[fw,:,:])
data = deepcopy(self.data[fw,:,:])/scale_factor
fit_status = ones(shape(data)[1:], dtype='float32')*-1
if len(shape(variance)) == 0:
if variance == None:
variance = 1.0
else:
variance = deepcopy(variance)/scale_factor**2
vcube = ones(shape(data), dtype='float32')
if len(shape(variance)) == 0:
vcube *= variance
elif len(shape(variance)) == 1:
for i,j in self.spec_indices:
vcube[:,i,j] = variance
elif len(shape(variance)) == 2:
for i,j in enumerate(vcube):
vcube[i] = variance
elif len(shape(variance)) == 3:
vcube = variance
npars = len(p0)
nan_solution = array([nan for i in range(npars+1)])
sol = zeros((npars+1,shape(self.data)[1], shape(self.data)[2]),
dtype='float32')
self.fitcont = zeros(shape(data), dtype='float32')
self.fitwl = wl
self.fitspec = zeros(shape(data), dtype='float32')
self.resultspec = zeros(shape(data), dtype='float32')
if self.binned:
vor = loadtxt(self.voronoi_tab)
xy = vor[unique(vor[:,2],return_index=True)[1],:2]
else:
xy = self.spec_indices
# Scale factor for the flux. Needed to avoid problems with
# the minimization algorithm.
flux_sf = ones(npars, dtype='float32')
flux_sf[arange(0, npars, npars_pc)] *= scale_factor
p0 /= flux_sf
if bounds != None:
bounds = array(bounds)
for i,j in enumerate(bounds):
j /= flux_sf[i]
Y, X = indices(shape(data)[1:])
if individual_spec:
xy = [individual_spec[::-1]]
elif spiral_loop:
if self.binned:
y, x = xy[:,0], xy[:,1]
else:
y, x = self.spec_indices[:,0], self.spec_indices[:,1]
if spiral_center == None:
r = sqrt((x - x.max()/2.)**2 + (y - y.max()/2.)**2)
else:
r = sqrt((x - spiral_center[0])**2 + (y - spiral_center[1])**2)
t = arctan2(y - y.max()/2., x - x.max()/2.)
t[t < 0] += 2*pi
b = array([(ravel(r)[i], ravel(t)[i]) for i in\
range(len(ravel(r)))], dtype=[('radius', 'f8'),\
('angle', 'f8')])
s = argsort(b, axis=0, order=['radius', 'angle'])
xy = column_stack([ravel(y)[s], ravel(x)[s]])
nspec = len(xy)
for k, h in enumerate(xy):
progress(k, nspec, 10)
i, j = h
if self.binned:
binNum = vor[(vor[:,0] == i)&(vor[:,1] == j), 2]
if ~any(data[:20,i,j]) or ~any(data[-20:,i,j]):
sol[:,i,j] = nan_solution
continue
v = vcube[:,i,j]
if fit_continuum:
cont = st.continuum(wl, data[:,i,j], **copts)[1]
else:
cont = self.cont[:,i,j]/scale_factor
s = data[:,i,j] - cont
# Avoids fitting if the spectrum is null.
try:
res = lambda x : sum( (s-fit_func(self.fitwl, x))**2/v )
if refit and k != 0:
radsol = sqrt((Y - i)**2 + (X - j)**2)
nearsol = sol[:-1, (radsol < refit_radius) &\
(fit_status == 0)]
if shape(nearsol) == (5, 1):
p0 = deepcopy(nearsol.transpose()/flux_sf)
elif any(nearsol):
p0 = deepcopy(average(nearsol.transpose(), 0)/flux_sf)
r = minimize(res, x0=p0, method=min_method, bounds=bounds,
constraints=constraints, options=minopts)
if r.status != 0:
print h, r.message
# Reduced chi squared of the fit.
chi2 = res(r['x'])
nu = len(s)/inst_disp - npars - 1
red_chi2 = chi2 / nu
p = append(r['x']*flux_sf, red_chi2)
fit_status[i,j] = r.status
except RuntimeError:
print 'Optimal parameters not found for spectrum {:d},{:d}'\
.format(int(i),int(j))
p = nan_solution
if self.binned:
for l, m in vor[vor[:,2] == binNum,:2]:
sol[:,l,m] = p
self.fitcont[:,l,m] = cont*scale_factor
self.fitspec[:,l,m] = (s+cont)*scale_factor
self.resultspec[:,l,m] = (cont+fit_func(self.fitwl,
r['x']))*scale_factor
else:
sol[:,i,j] = p
self.fitcont[:,i,j] = cont*scale_factor
self.fitspec[:,i,j] = (s+cont)*scale_factor
self.resultspec[:,i,j] = (cont+fit_func(self.fitwl, r['x']))\
*scale_factor
self.em_model = sol
self.fit_status = fit_status
p0 *= flux_sf
if writefits:
# Basic tests and first header
if outimage == None:
outimage = self.fitsfile.replace('.fits',
'_linefit.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
# Creates MEF output.
h = pf.HDUList()
h.append(pf.PrimaryHDU(header=hdr))
# Creates the fitted spectrum extension
hdr = pf.Header()
hdr.append(('object', 'spectrum', 'Data in this extension'))
hdr.append(('CRPIX3', 1, 'Reference pixel for wavelength'))
hdr.append(('CRVAL3', wl[0], 'Reference value for wavelength'))
hdr.append(('CD3_3', average(diff(wl)),
'CD3_3'))
h.append(pf.ImageHDU(data=self.fitspec, header=hdr))
# Creates the fitted continuum extension.
hdr['object'] = 'continuum'
h.append(pf.ImageHDU(data=self.fitcont, header=hdr))
# Creates the fitted function extension.
hdr['object'] = 'fit'
h.append(pf.ImageHDU(data=self.resultspec, header=hdr))
# Creates the solution extension.
hdr['object'] = 'parameters'
hdr.append(('function', function, 'Fitted function'))
hdr.append(('nfunc', len(p)/3, 'Number of functions'))
h.append(pf.ImageHDU(data=sol, header=hdr))
# Creates the minimize's exit status extension
hdr['object'] = 'status'
h.append(pf.ImageHDU(data=fit_status, header=hdr))
h.writeto(outimage)
if individual_spec:
return wl, s*scale_factor, cont*scale_factor,\
fit_func(wl, p[:-1]), r
else:
return sol
def loadfit(self, fname):
"""
Loads the result of a previous fit, and put it in the
appropriate variables for the plotfit function.
Parameters
----------
fname : string
Name of the FITS file generated by gmosdc.linefit.
Returns
-------
Nothing.
"""
self.fitwl = st.get_wl(fname, pix0key='crpix3', wl0key='crval3',
dwlkey='cd3_3', hdrext=1, dataext=1)
self.fitspec = pf.getdata(fname, ext=1)
self.fitcont = pf.getdata(fname, ext=2)
self.resultspec = pf.getdata(fname, ext=3)
func_name = pf.getheader(fname, ext=4)['function']
if func_name == 'gaussian':
self.fit_func = lprof.gauss
if func_name == 'gauss_hermite':
self.fit_func = lprof.gausshermite
self.em_model = pf.getdata(fname, ext=4)
def eqw(self, amp_index=0, center_index=1, sigma_index=2, sigma_limit=3):
"""
Evaluates the equivalent width of a previous linefit.
"""
xy = self.spec_indices
eqw_model = zeros(shape(self.em_model)[1:], dtype='float32')
eqw_direct = zeros(shape(self.em_model)[1:], dtype='float32')
fit_func = lambda x, a ,b, c: a*exp(-(x-b)**2/2./c**2)
for i,j in xy:
cond = (self.fitwl > self.em_model[center_index,i,j]\
- sigma_limit*self.em_model[sigma_index,i,j])\
& (self.fitwl < self.em_model[center_index,i,j]\
+ sigma_limit*self.em_model[sigma_index,i,j])
fit = fit_func(self.fitwl[cond],
*self.em_model[[amp_index, center_index, sigma_index], i, j])
cont = self.fitcont[cond,i,j]
eqw_model[i,j] = trapz(1. - (fit+cont)/cont, x=self.fitwl[cond])
eqw_direct[i,j] = trapz(1. - self.data[cond,i,j]/cont,
x=self.restwl[cond])
return array([eqw_model,eqw_direct])
def plotfit(self, x, y):
"""
Plots the spectrum and features just fitted.
Parameters
----------
x : number
Horizontal coordinate of the desired spaxel.
y : number
Vertical coordinate of the desired spaxel.
Returns
-------
Nothing.
"""
fig = plt.figure(1)
plt.clf()
ax = plt.axes()
p = self.em_model[:-1,y,x]
c = self.fitcont[:,y,x]
wl = self.fitwl
f = self.fit_func
s = self.fitspec[:,y,x]
ax.plot(wl, c + f(wl, p))
ax.plot(wl, c)
ax.plot(wl, s)
if self.fit_func == lprof.gauss:
npars = 3
parnames = ('A', 'wl', 's')
elif self.fit_func == lprof.gausshermite:
npars = 5
parnames = ('A', 'wl', 's', 'h3', 'h4')
else:
raise NameError('Unkown fit function.')
if len(p) > npars:
for i in arange(0, len(p), npars):
ax.plot(wl, c + f(wl, p[i:i+npars]), 'k--')
pars = (npars*'{:10s}'+'\n').format(*parnames)
for i in arange(0, len(p), npars):
pars += (('{:10.2e}'+(npars-1)*'{:10.2f}'+'\n')\
.format(*p[i:i+npars]))
print pars
plt.show()
def channelmaps(self, channels=6, lambda0=None, velmin=None, velmax=None,
continuum_width=300, continuum_opts=None, sigma=1e-16):
"""
Creates velocity channel maps from a data cube.
Parameters
----------
channels : integer
Number of channel maps to build
lambda0 : number
Central wavelength of the desired spectral feature
vmin : number
Mininum velocity in kilometers per second
vmax : number
Maximum velocity in kilometers per second
continuum_width : number
Width in wavelength for the continuum evaluation window
continuum_opts : dictionary
Dicitionary of options to be passed to the
spectools.continuum function
Returns
-------
"""
# Converting from velocities to wavelength
wlmin, wlmax = lambda0*(array([velmin, velmax])/2.99792e+5 + 1.)
wlstep = (wlmax - wlmin)/channels
wl_limits = arange(wlmin, wlmax + wlstep, wlstep)
side = int(ceil(sqrt(channels))) # columns
otherside = int(ceil(channels/side)) # lines
fig = plt.figure()
plt.clf()
if continuum_opts == None:
continuum_opts = {'niterate' : 3, 'degr' : 5,
'upper_threshold' : 3, 'lower_threshold' : 3}
cp = continuum_opts
cw = continuum_width
fw = lambda0 + array([-cw/2., cw/2.])
cont = self.continuum(niterate=cp['niterate'],
degr=cp['degr'], upper_threshold=cp['upper_threshold'],
lower_threshold=cp['lower_threshold'],
fitting_window=fw)
contwl = self.wl[ (self.wl > fw[0]) & (self.wl < fw[1]) ]
cont_wl2pix = interp1d(contwl, arange(len(contwl)))
for i in arange(channels):
ax = fig.add_subplot(otherside, side, i+1)
wl = self.restwl
wl0, wl1 = wl_limits[i], wl_limits[i+1]
print wl[(wl > wl0) & (wl < wl1)]
wlc, wlwidth = average([wl0, wl1]), (wl1-wl0)
f = self.wlprojection(wlc, fwhm=wlwidth, writefits=False,
filtertype='box') - cont[int(round(cont_wl2pix(wlc)))]
f[f < sigma] = nan
cp = continuum_opts
ax.imshow(f, interpolation='none', aspect=1)
ax.annotate('{:.0f}'.format((wlc - lambda0)/lambda0*2.99792e+5),
xy=(0.1, 0.8), xycoords='axes fraction', color='k')
if i%side != 0:
ax.set_yticklabels([])
if i/float( (otherside-1)*side ) < 1:
ax.set_xticklabels([])
fig.subplots_adjust(wspace=0, hspace=0)
plt.show()
def voronoi_binning(self, targetsnr=10.0, writefits=False,
outfile=None, clobber=False, writevortab=True):
"""
Applies Voronoi binning to the data cube, using Cappellari's
Python implementation.
Parameters:
-----------
targetsnr : float
Desired signal to noise ratio of the binned pixels
writefits : boolean
Writes a FITS image with the output of the binning.
outfile : string
Name of the output FITS file. If 'None' then the name of
the original FITS file containing the data cube will be used
as a root name, with '.bin' appended to it.
clobber : boolean
Overwrites files with the same name given in 'outfile'.
writevortab : boolean
Saves an ASCII table with the binning recipe.
Returns:
--------
Nothing.
"""
try:
x = shape(self.noise)
except AttributeError:
print 'This function requires prior execution of the snr_eval'\
+ 'method.'
return
valid_spaxels = ravel(~isnan(self.signal))
x = ravel(indices(shape(self.signal))[0])[valid_spaxels]
y = ravel(indices(shape(self.signal))[1])[valid_spaxels]
xnan = ravel(indices(shape(self.signal))[0])[~valid_spaxels]
ynan = ravel(indices(shape(self.signal))[1])[~valid_spaxels]
s, n = deepcopy(self.signal), deepcopy(self.noise)
s[s <= 0] = average(self.signal[self.signal > 0])
n[n <= 0] = average(self.signal[self.signal > 0])*.5
signal, noise = ravel(s)[valid_spaxels], ravel(n)[valid_spaxels]
binNum, xNode, yNode, xBar, yBar, sn, nPixels, scale = \
voronoi_2d_binning(x, y, signal, noise, targetsnr, plot=1, quiet=0)
v = column_stack([x, y, binNum])
if writevortab:
savetxt('voronoi_binning.dat', v, fmt='%.2f\t%.2f\t%d')
binned = zeros(shape(self.data), dtype='float32')
binned[:, xnan, ynan] = nan
for i in arange(binNum.max()+1):
samebin = v[:,2] == i
samebin_coords = v[samebin,:2]
binspec = average(self.data[:,samebin_coords[:,0],
samebin_coords[:,1]], axis=1)
for k in samebin_coords:
binned[:,k[0],k[1]] = binspec
if writefits:
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
hdr.append(('VORBIN',True,'Processed by Voronoi binning?'))
hdr.append(('VORTSNR',targetsnr,'Target SNR for Voronoi binning.'))
if outfile == None:
outfile = '{:s}bin.fits'.format(self.fitsfile[:-4])
pf.writeto(outfile,data=binned,header=hdr,clobber=clobber)
self.binned_cube = binned
def write_binnedspec(self, dopcor=False, writefits=False):
"""
Writes only one spectrum for each bin in a FITS file.
"""
xy = self.spec_indices
unique_indices = xy[unique(self.data[1400,:,:], return_index=True)[1]]
if dopcor:
try:
shape(self.em_model)
except AttributeError:
print 'ERROR! This function requires the gmosdc.em_model'\
+ ' attribute to be defined.'
return
for k,i,j in enumerate(unique_indices):
z = self.em_model[0,i,j]/2.998e+5
interp_spec = interp1d(self.restwl/(1.+z),self.data[i,j])
if k == 0:
specs = interp_spec(self.restwl)
else:
specs = row_stack([specs,interp_spec(self.restwl)])
else:
specs = row_stack([self.data[:,i,j] for i,j in unique_indices])
return specs
def ppxf_kinematics(self, fitting_window, base_wl, base_spec,
base_cdelt, writefits=True, outimage=None,
vel=0, sigma=180, fwhm_gal=2, fwhm_model=1.8, noise=0.05,
individual_spec=False, plotfit=False, quiet=False,
deg=4):
"""
Executes pPXF fitting of the stellar spectrum over the whole
data cube.
Parameters
----------
fitting_window : array-like
Initial and final values of wavelength for fitting.
base_wl : array
Wavelength coordinates of the base spectra.
base_spec : array
Flux density coordinates of the base spectra.
base_cdelt : number
Step in wavelength coordinates.
Returns
-------
Nothing
Description
-----------
This function is merely a wrapper for Michelle Capellari's pPXF
Python algorithm for penalized pixel fitting of stellar
spectra.
"""
w0, w1 = fitting_window
fw = (self.wl >= w0)&(self.wl < w1)
# Here we use the goodpixels as the fitting window
gp = arange(shape(self.data)[0])[fw]
lamRange1 = self.wl[[1, -1]]
gal_lin = deepcopy(self.data[:,0,0])
galaxy, logLam1, velscale = ppxf_util.log_rebin(lamRange1,
gal_lin)
lamRange2 = base_wl[[1,-1]]
ssp = base_spec[0]
sspNew, logLam2, velscale = ppxf_util.log_rebin(lamRange2, ssp,
velscale=velscale)
templates = empty((sspNew.size, len(base_spec)))
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SAURON and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SAURON
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by
# Gaussians.
FWHM_dif = sqrt(fwhm_gal**2 - fwhm_model**2)
# Sigma difference in pixels
sigma = FWHM_dif/2.355/base_cdelt
for j in range(len(base_spec)):
ssp = base_spec[j]
ssp = ndimage.gaussian_filter1d(ssp,sigma)
sspNew, logLam2, velscale = ppxf_util.log_rebin(lamRange2, ssp,
velscale=velscale)
# Normalizes templates
templates[:,j] = sspNew/median(sspNew)
c = 299792.458
dv = (logLam2[0]-logLam1[0])*c # km/s
z = exp(vel/c) - 1
# Here the actual fit starts.
start = [vel, 180.] # (km/s), starting guess for [V,sigma]
# Assumes uniform noise accross the spectrum
noise = zeros(shape(self.data)[0], dtype='float32') + noise
if self.binned:
vor = loadtxt(self.voronoi_tab)
xy = vor[unique(vor[:,2],return_index=True)[1],:2]
else:
xy = self.spec_indices
if individual_spec:
xy = [individual_spec[::-1]]
ppxf_sol = zeros((4, shape(self.data)[1], shape(self.data)[2]),
dtype='float32')
ppxf_spec = zeros(shape(self.data), dtype='float32')
ppxf_model = zeros(shape(ppxf_spec), dtype='float32')
nspec = len(xy)
for k, h in enumerate(xy):
progress(k, nspec, 10)
i, j = h
if self.binned:
binNum = vor[(vor[:,0] == i)&(vor[:,1] == j), 2]
gal_lin = deepcopy(self.data[:,i,j])
galaxy, logLam1, velscale = ppxf_util.log_rebin(lamRange1, gal_lin)
# Normalize spectrum to avoid numerical issues.
galaxy = galaxy/median(galaxy)
# Assume constant noise per pixel here.
# This should be changed in the future.
galaxy = deepcopy(self.data[:,i,j])
galaxy = galaxy/median(galaxy)
pp = ppxf.ppxf(templates, galaxy, noise, velscale, start,
goodpixels=gp, plot=plotfit, moments=4, degree=deg, vsyst=dv,
quiet=quiet)
if self.binned:
for l, m in vor[vor[:,2] == binNum,:2]:
ppxf_sol[:,l,m] = pp.sol
ppxf_spec[:,l,m] = pp.galaxy
ppxf_model[:,l,m] = pp.bestfit
else:
ppxf_sol[:,i,j] = pp.sol
ppxf_spec[:,i,j] = pp.galaxy
ppxf_model[:,i,j] = pp.bestfit
self.ppxf_sol = ppxf_sol
self.ppxf_spec = ppxf_spec
self.ppxf_model = ppxf_model
if writefits:
# Basic tests and first header
if outimage == None:
outimage = self.fitsfile.replace('.fits',
'_ppxf.fits')
hdr = deepcopy(self.header_data)
try:
hdr['REDSHIFT'] = self.redshift
except KeyError:
hdr.append(('REDSHIFT', self.redshift,
'Redshift used in GMOSDC'))
# Creates MEF output.
h = pf.HDUList()
h.append(pf.PrimaryHDU(header=hdr))
# Creates the fitted spectrum extension
hdr = pf.Header()
hdr.append(('object', 'spectrum', 'Data in this extension'))
hdr.append(('CRPIX3', 1, 'Reference pixel for wavelength'))
hdr.append(('CRVAL3', self.wl[0],
'Reference value for wavelength'))
hdr.append(('CD3_3', average(diff(self.wl)),
'CD3_3'))
h.append(pf.ImageHDU(data=self.ppxf_spec, header=hdr))
# Creates the fitted model extension.
hdr['object'] = 'model'
h.append(pf.ImageHDU(data=self.ppxf_model, header=hdr))
# Creates the solution extension.
hdr['object'] = 'parameters'
h.append(pf.ImageHDU(data=self.ppxf_sol, header=hdr))
h.writeto(outimage)
def lineflux(self, amplitude, sigma):
"""
Calculates the flux in a line given the amplitude and sigma
of the gaussian function that fits it.
"""
lf = amplitude * abs(sigma) * sqrt(2.*pi)
return lf
| danielrd6/ifscube | cubetools.py | Python | gpl-3.0 | 39,584 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
from pkg_resources import resource_filename
from nupic.algorithms.anomaly import computeRawAnomalyScore
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder
_VERBOSITY = 0 # how chatty the demo should be
_SEED = 1956 # the random seed used throughout
_INPUT_FILE_PATH = resource_filename(
"nupic.datafiles", "extra/hotgym/rec-center-hourly.csv"
)
_OUTPUT_PATH = "network-demo-output.csv"
_NUM_RECORDS = 2000
# Config field for SPRegion
SP_PARAMS = {
"spVerbosity": _VERBOSITY,
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
# This must be set before creating the SPRegion
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"maxBoost": 1.0,
}
# Config field for TPRegion
TP_PARAMS = {
"verbosity": _VERBOSITY,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}
def createEncoder():
"""Create the encoder instance for our test and return it."""
consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
clipInput=True)
time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")
encoder = MultiEncoder()
encoder.addEncoder("consumption", consumption_encoder)
encoder.addEncoder("timestamp", time_encoder)
return encoder
def createNetwork(dataSource):
"""Create the Network instance.
The network has a sensor region reading data from `dataSource` and passing
the encoded representation to an SPRegion. The SPRegion output is passed to
a TPRegion.
:param dataSource: a RecordStream instance to get data from
:returns: a Network instance ready to run
"""
network = Network()
# Our input is sensor data from the gym file. The RecordSensor region
# allows us to specify a file record stream as the input source via the
# dataSource attribute.
network.addRegion("sensor", "py.RecordSensor",
json.dumps({"verbosity": _VERBOSITY}))
sensor = network.regions["sensor"].getSelf()
# The RecordSensor needs to know how to encode the input values
sensor.encoder = createEncoder()
# Specify the dataSource as a file record stream instance
sensor.dataSource = dataSource
# Create the spatial pooler region
SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))
# Link the SP region to the sensor input
network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")
# Add the TPRegion on top of the SPRegion
network.addRegion("temporalPoolerRegion", "py.TPRegion",
json.dumps(TP_PARAMS))
network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="topDownIn")
# Add the AnomalyRegion on top of the TPRegion
network.addRegion("anomalyRegion", "py.AnomalyRegion", json.dumps({}))
network.link("spatialPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="bottomUpOut", destInput="activeColumns")
network.link("temporalPoolerRegion", "anomalyRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="predictedColumns")
network.initialize()
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
# Make sure learning is enabled
spatialPoolerRegion.setParameter("learningMode", True)
# We want temporal anomalies so disable anomalyMode in the SP. This mode is
# used for computing anomalies in a non-temporal model.
spatialPoolerRegion.setParameter("anomalyMode", False)
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
# Enable topDownMode to get the predicted columns output
temporalPoolerRegion.setParameter("topDownMode", True)
# Make sure learning is enabled (this is the default)
temporalPoolerRegion.setParameter("learningMode", True)
# Enable inference mode so we get predictions
temporalPoolerRegion.setParameter("inferenceMode", True)
# Enable anomalyMode to compute the anomaly score. This actually doesn't work
# now so doesn't matter. We instead compute the anomaly score based on
# topDownOut (predicted columns) and SP bottomUpOut (active columns).
temporalPoolerRegion.setParameter("anomalyMode", True)
return network
def runNetwork(network, writer):
"""Run the network and write output to writer.
:param network: a Network instance to run
:param writer: a csv.writer instance to write output to
"""
sensorRegion = network.regions["sensor"]
spatialPoolerRegion = network.regions["spatialPoolerRegion"]
temporalPoolerRegion = network.regions["temporalPoolerRegion"]
anomalyRegion = network.regions["anomalyRegion"]
prevPredictedColumns = []
i = 0
for _ in xrange(_NUM_RECORDS):
# Run the network for a single iteration
network.run(1)
# Write out the anomaly score along with the record number and consumption
# value.
anomalyScore = anomalyRegion.getOutputData("rawAnomalyScore")[0]
consumption = sensorRegion.getOutputData("sourceOut")[0]
writer.writerow((i, consumption, anomalyScore))
i += 1
if __name__ == "__main__":
dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
network = createNetwork(dataSource)
outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_PATH)
with open(outputPath, "w") as outputFile:
writer = csv.writer(outputFile)
print "Writing output to %s" % outputPath
runNetwork(network, writer)
| tomsilver/nupic | examples/network/network_api_demo.py | Python | gpl-3.0 | 7,557 |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from .noncvx_variable import NonCvxVariable
import cvxpy as cp
import numpy as np
class Orthog(NonCvxVariable):
""" A variable satisfying X^TX = I. """
def __init__(self, size, *args, **kwargs):
super().__init__(shape=(size, size), *args, **kwargs)
def init_z(self, random):
"""Initializes the value of the replicant variable.
"""
self.z.value = np.zeros(self.shape)
def _project(self, matrix):
"""All singular values except k-largest (by magnitude) set to zero.
"""
U, s, V = np.linalg.svd(matrix)
s[:] = 1
return U.dot(np.diag(s)).dot(V)
# Constrain all entries to be the value in the matrix.
def _restrict(self, matrix):
return [self == matrix]
def relax(self):
"""Relaxation [I X; X^T I] is PSD.
"""
rows, cols = self.shape
constr = super(Orthog, self).relax()
mat = cp.bmat([[np.eye(rows), self], [X.T, np.eye(cols)]])
return constr + [mat >> 0]
| cvxgrp/ncvx | ncvx/orthog.py | Python | gpl-3.0 | 1,674 |
import sure
import tempfile
from contents import contents
def test_file_with_long_levels():
content = '''/**
* Project X
* Author: Jean Pimentel
* Date: August, 2013
*/
/* > Intro */
Toc toc! Penny! Toc toc! Penny! Toc toc! Penny!
/* >> The Big Bang Theory << */
The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill Prady.
/* ==>>> Characters ========================================================= */
Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny
/* >>>> Production
============================================================================= */
Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro
Producer(s): Faye Oshima Belyeu
/* =>>>>> Info section: number of seasons - number of episodes
============================================================================= */
No. of seasons: 5
No. of episodes: 111
/* =>>>>>> A collection of our favorite quotes from the show <=============== */
* Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors.
* Sheldon: I'm not insane, my mother had me tested!
'''
new_content = '''/* TABLE OF CONTENTS
Intro ............................................................... 17
The Big Bang Theory ............................................. 20
Characters .................................................. 23
Production .............................................. 26
Info section: number of seasons - number of e[...] .. 31
A collection of our favorite quotes from [...] .. 36
============================================================================= */
/**
* Project X
* Author: Jean Pimentel
* Date: August, 2013
*/
/* > Intro */
Toc toc! Penny! Toc toc! Penny! Toc toc! Penny!
/* >> The Big Bang Theory << */
The Big Bang Theory is an American sitcom created by Chuck Lorre and Bill Prady.
/* ==>>> Characters ========================================================= */
Leonard Hofstadter, Sheldon Cooper, Howard Wolowitz, Rajesh Koothrappali, Penny
/* >>>> Production
============================================================================= */
Executive producer(s): Chuck Lorre, Bill Prady, Steven Molaro
Producer(s): Faye Oshima Belyeu
/* =>>>>> Info section: number of seasons - number of episodes
============================================================================= */
No. of seasons: 5
No. of episodes: 111
/* =>>>>>> A collection of our favorite quotes from the show <=============== */
* Sheldon: Scissors cuts paper, paper covers rock, rock crushes lizard, lizard poisons Spock, Spock smashes scissors, scissors decapitates lizard, lizard eats paper, paper disproves Spock, Spock vaporizes rock, and as it always has, rock crushes scissors.
* Sheldon: I'm not insane, my mother had me tested!
'''
temp = tempfile.NamedTemporaryFile()
try:
temp.write(content)
temp.seek(0)
contents(temp.name)
temp.seek(0)
temp.read().should.be.equal(new_content)
finally:
temp.close()
| jeanpimentel/contents | tests/functional/test_file_with_long_levels.py | Python | gpl-3.0 | 3,294 |
"""
Functions and decorators for making sure the parameters they work on are of
iterable types.
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import functools
import numbers
def is_integral(obj):
"""
Determine whether the passed in object is a number of integral type.
"""
return isinstance(obj, numbers.Integral)
def is_string(obj):
"""
Determine if the passed in object is a string.
"""
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
def is_iterable(obj):
"""
Determine if the passed in object is an iterable, but not a string or dict.
"""
return (hasattr(obj, '__iter__') and
not isinstance(obj, dict) and
not is_string(obj))
def as_iterable(params=None):
"""
Make sure the marked parameters are iterable. In case a single-unwrapped
parameter is found among them (e.g. an int, string, ...), wrap it in a
list and forward like that to the wrapped function. The marked parameters,
if not explicitly specified, defaults to the 1st argument (``args[1]``).
"""
# set up default converter and separate positional from keyword arguments
params = params or [1]
indexes = [i for i in params if is_integral(i)]
keys = [k for k in params if is_string(k)]
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
# patch positional arguments, if needed
if indexes:
# copy `args` into a new list and wrap it's elements in a list
# on the specified indexes, which are not iterables themselves
args = [[x] if i in indexes and not is_iterable(x) else x
for (i, x) in enumerate(args)]
# patch keyword arguments, if needed
if keys:
for key in keys:
if not is_iterable(kwargs[key]):
kwargs[key] = [kwargs[key]]
# invoke ``fn`` with patched parameters
return fn(*args, **kwargs)
return wrapper
return decorator
| Outernet-Project/librarian | librarian/core/utils/iterables.py | Python | gpl-3.0 | 2,296 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PyQt4 import QtCore, QtGui
import widgets
import api.character.rankadv
class NextRankDlg(QtGui.QDialog):
def __init__(self, pc, parent=None):
super(NextRankDlg, self).__init__(parent)
self.pc = pc
self.build_ui()
self.connect_signals()
# self.setWindowFlags(QtCore.Qt.Tool)
self.setWindowTitle(self.tr("L5R: CM - Advance Rank"))
def build_ui(self):
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(QtGui.QLabel(self.tr("""\
You can now advance your Rank,
what would you want to do?
""")))
self.bt_go_on = QtGui.QPushButton(
self.tr("Advance in my current school")
)
self.bt_new_school = QtGui.QPushButton(
self.tr("Join a new school"))
for bt in [self.bt_go_on, self.bt_new_school]:
bt.setMinimumSize(QtCore.QSize(0, 38))
vbox.addWidget(self.bt_go_on)
vbox.addWidget(self.bt_new_school)
vbox.setSpacing(12)
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
# check if the PC is following an alternate path
if is_path:
# offer to going back
self.bt_go_on.setText(self.tr("Go back to your old school"))
def connect_signals(self):
self.bt_go_on.clicked.connect(self.simply_go_on)
self.bt_new_school.clicked.connect(self.join_new_school)
def join_new_school(self):
dlg = widgets.SchoolChooserDialog(self)
if dlg.exec_() == QtGui.QDialog.Rejected:
return
self.accept()
def simply_go_on(self):
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
# check if the PC is following an alternate path
if is_path:
# the PC want to go back to the old school.
# find the first school that is not a path
api.character.rankadv.leave_path()
else:
api.character.rankadv.advance_rank()
self.accept()
def test():
import sys
app = QtGui.QApplication(sys.argv)
dlg = NextRankDlg(None, None)
dlg.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| Kaniabi/l5r-character-manager-3 | l5r/dialogs/newrankdlg.py | Python | gpl-3.0 | 3,023 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid - **Generic Impact Function
on Population for Continuous Hazard.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. todo:: Check raster is single band
"""
import numpy
from safe.impact_functions.generic\
.continuous_hazard_population.metadata_definitions import \
ContinuousHazardPopulationMetadata
from safe.impact_functions.bases.continuous_rh_continuous_re import \
ContinuousRHContinuousRE
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
from safe.impact_functions.core import (
evacuated_population_needs,
population_rounding,
has_no_data)
from safe.storage.raster import Raster
from safe.utilities.i18n import tr
from safe.common.utilities import format_int
from safe.common.tables import Table, TableRow
from safe.common.utilities import create_classes, create_label, humanize_class
from safe.common.exceptions import (
FunctionParametersError, ZeroImpactException)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
filter_needs_parameters
__author__ = 'lucernae'
__date__ = '24/03/15'
__revision__ = '$Format:%H$'
__copyright__ = ('Copyright 2014, Australia Indonesia Facility for '
'Disaster Reduction')
class ContinuousHazardPopulationFunction(ContinuousRHContinuousRE):
# noinspection PyUnresolvedReferences
"""Plugin for impact of population as derived by continuous hazard."""
_metadata = ContinuousHazardPopulationMetadata()
def __init__(self):
super(ContinuousHazardPopulationFunction, self).__init__()
self.impact_function_manager = ImpactFunctionManager()
# AG: Use the proper minimum needs, update the parameters
self.parameters = add_needs_parameters(self.parameters)
def _tabulate(
self,
high,
low,
medium,
question,
total_impact):
# Generate impact report for the pdf map
table_body = [
question,
TableRow([tr('People impacted '),
'%s' % format_int(total_impact)],
header=True),
TableRow([tr('People in high hazard area '),
'%s' % format_int(high)],
header=True),
TableRow([tr('People in medium hazard area '),
'%s' % format_int(medium)],
header=True),
TableRow([tr('People in low hazard area'),
'%s' % format_int(low)],
header=True)]
return table_body
def _tabulate_notes(
self,
minimum_needs,
table_body,
total,
total_impact,
no_data_warning):
# Extend impact report for on-screen display
table_body.extend([
TableRow(tr('Notes'), header=True),
tr('Map shows population count in high, medium, and low hazard '
'area.'),
tr('Total population: %s') % format_int(total),
TableRow(tr(
'Table below shows the minimum needs for all '
'affected people'))])
if no_data_warning:
table_body.extend([
tr('The layers contained `no data`. This missing data was '
'carried through to the impact layer.'),
tr('`No data` values in the impact layer were treated as 0 '
'when counting the affected or total population.')
])
total_needs = evacuated_population_needs(
total_impact, minimum_needs)
for frequency, needs in total_needs.items():
table_body.append(TableRow(
[
tr('Needs should be provided %s' % frequency),
tr('Total')
],
header=True))
for resource in needs:
table_body.append(TableRow([
tr(resource['table name']),
format_int(resource['amount'])]))
return table_body, total_needs
def run(self):
"""Plugin for impact of population as derived by continuous hazard.
Hazard is reclassified into 3 classes based on the extrema provided
as impact function parameters.
Counts number of people exposed to each category of the hazard
:returns:
Map of population exposed to high category
Table with number of people in each category
"""
self.validate()
self.prepare()
thresholds = [
p.value for p in self.parameters['Categorical thresholds'].value]
# Thresholds must contain 3 thresholds
if len(thresholds) != 3:
raise FunctionParametersError(
'The thresholds must consist of 3 values.')
# Thresholds must monotonically increasing
monotonically_increasing_flag = all(
x < y for x, y in zip(thresholds, thresholds[1:]))
if not monotonically_increasing_flag:
raise FunctionParametersError(
'Each threshold should be larger than the previous.')
# The 3 categories
low_t = thresholds[0]
medium_t = thresholds[1]
high_t = thresholds[2]
# Extract data as numeric arrays
hazard_data = self.hazard.layer.get_data(nan=True) # Category
no_data_warning = False
if has_no_data(hazard_data):
no_data_warning = True
# Calculate impact as population exposed to each category
exposure_data = self.exposure.layer.get_data(nan=True, scaling=True)
if has_no_data(exposure_data):
no_data_warning = True
# Make 3 data for each zone. Get the value of the exposure if the
# exposure is in the hazard zone, else just assign 0
low_exposure = numpy.where(hazard_data < low_t, exposure_data, 0)
medium_exposure = numpy.where(
(hazard_data >= low_t) & (hazard_data < medium_t),
exposure_data, 0)
high_exposure = numpy.where(
(hazard_data >= medium_t) & (hazard_data <= high_t),
exposure_data, 0)
impacted_exposure = low_exposure + medium_exposure + high_exposure
# Count totals
total = int(numpy.nansum(exposure_data))
low_total = int(numpy.nansum(low_exposure))
medium_total = int(numpy.nansum(medium_exposure))
high_total = int(numpy.nansum(high_exposure))
total_impact = high_total + medium_total + low_total
# Check for zero impact
if total_impact == 0:
table_body = [
self.question,
TableRow(
[tr('People impacted'),
'%s' % format_int(total_impact)], header=True)]
message = Table(table_body).toNewlineFreeString()
raise ZeroImpactException(message)
# Don't show digits less than a 1000
total = population_rounding(total)
total_impact = population_rounding(total_impact)
low_total = population_rounding(low_total)
medium_total = population_rounding(medium_total)
high_total = population_rounding(high_total)
minimum_needs = [
parameter.serialize() for parameter in
filter_needs_parameters(self.parameters['minimum needs'])
]
table_body = self._tabulate(
high_total, low_total, medium_total, self.question, total_impact)
impact_table = Table(table_body).toNewlineFreeString()
table_body, total_needs = self._tabulate_notes(
minimum_needs, table_body, total, total_impact, no_data_warning)
impact_summary = Table(table_body).toNewlineFreeString()
map_title = tr('People in each hazard areas (low, medium, high)')
# Style for impact layer
colours = [
'#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
classes = create_classes(impacted_exposure.flat[:], len(colours))
interval_classes = humanize_class(classes)
style_classes = []
for i in xrange(len(colours)):
style_class = dict()
if i == 1:
label = create_label(
interval_classes[i],
tr('Low Population [%i people/cell]' % classes[i]))
elif i == 4:
label = create_label(
interval_classes[i],
tr('Medium Population [%i people/cell]' % classes[i]))
elif i == 7:
label = create_label(
interval_classes[i],
tr('High Population [%i people/cell]' % classes[i]))
else:
label = create_label(interval_classes[i])
style_class['label'] = label
style_class['quantity'] = classes[i]
if i == 0:
transparency = 100
else:
transparency = 0
style_class['transparency'] = transparency
style_class['colour'] = colours[i]
style_classes.append(style_class)
style_info = dict(
target_field=None,
style_classes=style_classes,
style_type='rasterStyle')
# Create raster object and return
raster_layer = Raster(
data=impacted_exposure,
projection=self.hazard.layer.get_projection(),
geotransform=self.hazard.layer.get_geotransform(),
name=tr('Population might %s') % (
self.impact_function_manager.
get_function_title(self).lower()),
keywords={
'impact_summary': impact_summary,
'impact_table': impact_table,
'map_title': map_title,
'total_needs': total_needs},
style_info=style_info)
self._impact = raster_layer
return raster_layer
| MariaSolovyeva/inasafe | safe/impact_functions/generic/continuous_hazard_population/impact_function.py | Python | gpl-3.0 | 10,346 |
"""
The LDBDClient module provides an API for connecting to and making requests of
a LDBDServer.
This module requires U{pyGlobus<http://www-itg.lbl.gov/gtg/projects/pyGlobus/>}.
This file is part of the Grid LSC User Environment (GLUE)
GLUE is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
from pycbc_glue import git_version
__date__ = git_version.date
__version__ = git_version.id
import sys
import os
import exceptions
import types
import re
import cPickle
import xml.parsers.expat
from pyGlobus import io
from pyGlobus import security
def version():
return __version__
class SimpleLWXMLParser:
"""
A very simple LIGO_LW XML parser class that reads the only keeps
tables that do not contain the strings sngl_ or multi_
The class is not very robust as can have problems if the line
breaks do not appear in the standard places in the XML file.
"""
def __init__(self):
"""
Constructs an instance.
The private variable ignore_pat determines what tables we ignore.
"""
self.__p = xml.parsers.expat.ParserCreate()
self.__in_table = 0
self.__silent = 0
self.__ignore_pat = re.compile(r'.*(sngl_|multi_).*', re.IGNORECASE)
self.__p.StartElementHandler = self.start_element
self.__p.EndElementHandler = self.end_element
def __del__(self):
"""
Destroys an instance by shutting down and deleting the parser.
"""
self.__p("",1)
del self.__p
def start_element(self, name, attrs):
"""
Callback for start of an XML element. Checks to see if we are
about to start a table that matches the ignore pattern.
@param name: the name of the tag being opened
@type name: string
@param attrs: a dictionary of the attributes for the tag being opened
@type attrs: dictionary
"""
if name.lower() == "table":
for attr in attrs.keys():
if attr.lower() == "name":
if self.__ignore_pat.search(attrs[attr]):
self.__in_table = 1
def end_element(self, name):
"""
Callback for the end of an XML element. If the ignore flag is
set, reset it so we start outputing the table again.
@param name: the name of the tag being closed
@type name: string
"""
if name.lower() == "table":
if self.__in_table:
self.__in_table = 0
def parse_line(self, line):
"""
For each line we are passed, call the XML parser. Returns the
line if we are outside one of the ignored tables, otherwise
returns the empty string.
@param line: the line of the LIGO_LW XML file to be parsed
@type line: string
@return: the line of XML passed in or the null string
@rtype: string
"""
self.__p.Parse(line)
if self.__in_table:
self.__silent = 1
if not self.__silent:
ret = line
else:
ret = ""
if not self.__in_table:
self.__silent = 0
return ret
class LDBDClientException(Exception):
"""Exceptions returned by server"""
def __init__(self,args=None):
self.args = args
class LDBDClient(object):
def __init__(self, host, port, identity):
"""
Open a connection to a LDBD Server and return an instance of
class LDBDClient. One of the public methods can then be
called to send a request to the server.
@param host: the host on which the LDBD Server runs
@type host: string
@param port: port on which the LDBD Server listens
@type port: integer
@param identity: string which the LDBD Server identifies itself
@type identity: string
@return: Instance of LDBDClient
"""
try:
self.__connect__(host,port,identity)
except Exception, e:
raise
def __del__(self):
"""
Disconnect from the LDBD server.
@return: None
"""
self.__disconnect__()
def __connect__(self,host,port,identity):
"""
Attempt to open a connection to the LDBD Server
using the 'host' and 'port' and expecting the server
to identify itself with a corresponding host certificate.
A IOException is raised if the connection cannot be made,
but this is caught by the __init__ method above and
turned into a LDBDClient exception.
@param host: the host on which the LDBD Server runs
@type host: string
@param port: port on which the LDBD Server listens
@type port: integer
@param identity: string which the LDBD Server identifies itself
@type identity: string
@return: None
"""
# remove the globus tcp port range environment variable if set
try:
port_range = os.environ["GLOBUS_TCP_PORT_RANGE"]
os.environ["GLOBUS_TCP_PORT_RANGE"] = ""
except:
pass
self.host = host
self.port = port
self.identity = identity
# redirect stdout and stderror for now
try:
f = open("/dev/null", "w")
sys.stdout = f
sys.stderr = f
except:
pass
try:
# create TCPIOAttr instance
clientAttr = io.TCPIOAttr()
authData = io.AuthData()
soc = io.GSITCPSocket()
if identity is None:
# try an unauthenticated connection
clientAttr.set_authentication_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_NONE)
clientAttr.set_authorization_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_NONE, authData)
clientAttr.set_channel_mode(
io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR)
clientAttr.set_delegation_mode(
io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_NONE)
else:
# set authentication mode to be GSSAPI
clientAttr.set_authentication_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_GSSAPI)
# set expected identity
authData.set_identity(identity)
# set authorization, channel, and delegation modes
clientAttr.set_authorization_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_IDENTITY, authData)
clientAttr.set_channel_mode(
io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR)
clientAttr.set_delegation_mode(
io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_LIMITED_PROXY)
soc.connect(host, port, clientAttr)
self.socket = soc
self.sfile = soc.makefile("rw")
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
f.close()
def __disconnect__(self):
"""
Disconnect from the LDBD Server.
@return: None
"""
try:
self.socket.shutdown(2)
except:
pass
def __response__(self):
"""
Read the response sent back by the LDBD Server. Parse out the
return code with 0 for success and non-zero for error, and then
the list of strings representing the returned result(s).
@return: tuple containing the integer error code and the list of
strings representing the output from the server
"""
f = self.sfile
response = ""
# Read in 512 byte chunks until there is nothing left to read.
# This blocks until the socket is ready for reading and until
# 512 bytes are received. If the message is less then 512 bytes
# this will block until the server closes the socket. Since
# the server always shuts down the socket after sending its
# reply this should continue to work for now.
while 1:
input = f.read(size = 512, waitForBytes = 512)
response += input
if len(input) < 512: break
# the response from the server must always end in a null byte
try:
if response[-1] != '\0':
msg = "Bad server reponse format. Contact server administrator."
raise LDBDClientException, msg
except:
msg = "Connection refused. The server may be down or you may not have" + \
"authorization to access this server. Contact server administrator."
raise LDBDClientException, msg
# delete the last \0 before splitting into strings
response = response[0:-1]
try:
stringList = response.split('\0')
code = int(stringList[0])
output = stringList[1:]
except Exception, e:
msg = "Error parsing response from server : %s" % e
try:
f.close()
except:
pass
raise LDBDClientException, msg
f.close()
return code, output
def ping(self):
"""
Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "PING\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error pinging server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def query(self,sql):
"""
Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "QUERY\0" + sql + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing query on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insert(self,xmltext):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "INSERT\0" + xmltext + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insertmap(self,xmltext,lfnpfn_dict):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
pmsg = cPickle.dumps(lfnpfn_dict)
msg = "INSERTMAP\0" + xmltext + "\0" + pmsg + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insertdmt(self,xmltext):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "INSERTDMT\0" + xmltext + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
| ligo-cbc/pycbc-glue | pycbc_glue/LDBDClient.py | Python | gpl-3.0 | 11,113 |
import html
import inflect
import titlecase
from flask import url_for
from shared.pd_exception import DoesNotExistException
from .. import APP, importing
from ..data import match
from ..view import View
@APP.route('/match/<int:match_id>/')
def show_match(match_id: int) -> str:
view = Match(match.get_match(match_id))
return view.page()
# pylint: disable=no-self-use,too-many-instance-attributes
class Match(View):
def __init__(self, viewed_match: match.Match) -> None:
super().__init__()
if not viewed_match:
raise DoesNotExistException()
self.match = viewed_match
self.id = viewed_match.id
self.comment = viewed_match.comment
self.format_name = viewed_match.format_name()
self.players_string = ' vs '.join([p.name for p in viewed_match.players])
self.players_string_safe = ' vs '.join([player_link(p.name) for p in viewed_match.players])
self.module_string = ', '.join([m.name for m in viewed_match.modules])
if not viewed_match.games:
self.no_games = True
return
self.game_one = viewed_match.games[0]
self.has_game_two = False
self.has_game_three = False
if len(viewed_match.games) > 1:
self.has_game_two = True
self.game_two = viewed_match.games[1]
if len(viewed_match.games) > 2:
self.has_game_three = True
self.game_three = viewed_match.games[2]
if viewed_match.has_unexpected_third_game is None:
importing.reimport(viewed_match)
self.has_unexpected_third_game = viewed_match.has_unexpected_third_game
if viewed_match.is_tournament is None:
importing.reimport(viewed_match)
self.is_tournament = viewed_match.is_tournament
def og_title(self) -> str:
return self.players_string
def og_url(self) -> str:
return url_for('show_match', match_id=self.id, _external=True)
def og_description(self) -> str:
p = inflect.engine()
fmt = titlecase.titlecase(p.a(self.format_name))
description = '{fmt} match.'.format(fmt=fmt)
return description
def player_link(name: str) -> str:
url = url_for('show_person', person=name)
return '<a href="{url}">{name}</a>'.format(url=html.escape(url), name=html.escape(name))
| PennyDreadfulMTG/Penny-Dreadful-Discord-Bot | logsite/views/match_view.py | Python | gpl-3.0 | 2,353 |
from pattern import Pattern
import itertools
import random
import colorsys
import time
class EqPattern(Pattern):
def __init__(self, meter_color=(255,100,50), background_color=(0,50,255)):
self.meter_r = meter_color[0]
self.meter_g = meter_color[1]
self.meter_b = meter_color[2]
self.bg_r = background_color[0]
self.bg_g = background_color[1]
self.bg_b = background_color[2]
# TODO: delete?
# self.register_param("meter_r", 0, 255, meter_color[0])
# self.register_param("meter_g", 0, 255, meter_color[1])
# self.register_param("meter_b", 0, 255, meter_color[2])
# self.register_param("bg_r", 0, 255, background_color[0])
# self.register_param("bg_g", 0, 255, background_color[1])
# self.register_param("bg_b", 0, 255, background_color[2])
self.register_param("max_hue_shift", 0, 0.5, 0.2)
self.register_param("beat_channel", 0, 6, 2)
self.register_param("max_bpm", 0, 200, 100)
self.register_param("prob_shift", 0, 1, 100)
self.next_shift = time.time()
def meter_color(self):
return (self.meter_r, self.meter_g, self.meter_b)
def background_color(self):
return (self.bg_r, self.bg_g, self.bg_b)
# TODO: put this into utils or something
def hue_shift(self, color, hue_shift):
color_scaled = [x/255.0 for x in color]
hsv = list(colorsys.rgb_to_hsv(color_scaled[0], color_scaled[1], color_scaled[2]))
hsv[0] += hue_shift % 1
return tuple([int(x*255) for x in colorsys.hsv_to_rgb(hsv[0], hsv[1], hsv[2])])
def next_frame(self, octopus, data):
beat_channel = int(round(self.beat_channel))
t = time.time()
if data.beats[beat_channel] and t > self.next_shift:
self.next_shift = t + 60.0/self.max_bpm
shift = self.max_hue_shift*(2*random.random() - 1)
if int(round(random.random())):
self.meter_r, self.meter_g, self.meter_b = self.hue_shift(self.meter_color(), shift)
else:
self.bg_r, self.bg_g, self.bg_b = self.hue_shift(self.background_color(), shift)
meter_color = self.meter_color()
background_color = self.background_color()
eq = itertools.cycle(data.eq)
for tentacle in octopus.tentacles:
level = next(eq)
for led_strip in tentacle.led_strips:
pixel_colors = []
n_meter_pixels = int(len(led_strip.pixels)*float(level))
pixel_colors.extend([meter_color for x in range(n_meter_pixels)])
n_background_pixels = len(led_strip.pixels) - n_meter_pixels
pixel_colors.extend([background_color for x in range(n_background_pixels)])
led_strip.put_pixels(pixel_colors)
| TheGentlemanOctopus/thegentlemanoctopus | octopus_code/core/octopus/patterns/eqPattern.py | Python | gpl-3.0 | 2,854 |
$NetBSD: patch-buildtools_wafsamba_samba__conftests.py,v 1.2 2019/11/10 17:01:58 adam Exp $
Ensure defines are strings to avoid assertion failure, some
returned values are unicode.
--- buildtools/wafsamba/samba_conftests.py.orig 2019-07-09 10:08:41.000000000 +0000
+++ buildtools/wafsamba/samba_conftests.py
@@ -97,9 +97,9 @@ def CHECK_LARGEFILE(conf, define='HAVE_L
if flag[:2] == "-D":
flag_split = flag[2:].split('=')
if len(flag_split) == 1:
- conf.DEFINE(flag_split[0], '1')
+ conf.DEFINE(str(flag_split[0]), '1')
else:
- conf.DEFINE(flag_split[0], flag_split[1])
+ conf.DEFINE(str(flag_split[0]), str(flag_split[1]))
if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1',
define,
| oposs/pkg.oetiker.ch-build | build/samba4/patches/patch-buildtools_wafsamba_samba__conftests.py | Python | gpl-3.0 | 890 |
# YouTube Video: https://www.youtube.com/watch?v=RRK0gd77Ln0
# Given a string, calculate the length of the string.
input_str = "LucidProgramming"
# Standard Pythonic way:
# print(len(input_str))
# Iterative length calculation: O(n)
def iterative_str_len(input_str):
input_str_len = 0
for i in range(len(input_str)):
input_str_len += 1
return input_str_len
# Recursive length calculation: O(n)
def recursive_str_len(input_str):
if input_str == '':
return 0
return 1 + recursive_str_len(input_str[1:])
print(iterative_str_len(input_str))
print(recursive_str_len(input_str))
| vprusso/youtube_tutorials | algorithms/recursion/str_len.py | Python | gpl-3.0 | 615 |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import gc
import argparse
from util import learn
def get_arguments():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('data', type=str, help='Training data set')
parser.add_argument('model', type=str, help='Model')
parser.add_argument('--epochs', type=int, default=1, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=100, help='Size of a batch')
parser.add_argument('--validation', type=str, default=None, help='Validation data set')
return parser.parse_args()
args = get_arguments()
learn.train(args.data, args.validation, args.model, args.epochs, args.batch_size)
gc.collect()
| patrick-winter-knime/deep-learning-on-molecules | smiles-vhts-embedding/train.py | Python | gpl-3.0 | 726 |
from builtins import str
from builtins import range
from django.http import HttpResponse,HttpResponseNotFound
from django.template import loader
from django.shortcuts import get_list_or_404
from django.shortcuts import render
from .forms import TestMessageForm
from .models import Device
from django.conf import settings
from django.contrib.auth.decorators import login_required
#from stack_configs.models import connectToLDAP,resetLDAPpassword, simpleLDAPQuery,addToLDAPGroup
from stack_configs.stack_functions import constructStatusList
from stack_configs.mqtt_functions import MqttData,TopicData,processMessages
from stack_configs.ldap_functions import createLDAPDevice,getLDAPConn,addToLDAPGroup,resetLDAPpassword,getLDAPConnWithUser
from stack_configs.influx_functions import getLastInflux
from stack_configs.mqtt_paho_functions import connectToMqtt
import string
import random
import logging
logger = logging.getLogger(__name__)
@login_required(login_url='/admin/login/?next=/admin/')
def index(request):
#test ldap auth
con=getLDAPConn()
con.search(settings.AUTH_LDAP_USERS_OU_DN, '(&(objectclass=inetOrgPerson)(uid=rtamudo))', attributes=['sn'])
print(con.entries[0].entry_to_ldif())
#search group
#con.search(settings.AUTH_LDAP_GROUPS_OU_DN, '(&(objectclass=posixGroup)(cn=active))',attributes=['gidNumber','memberUid'])
con.search(settings.AUTH_LDAP_GROUPS_OU_DN, '(&(objectclass=posixGroup)(cn=active)(memberUid=rtamudo))',attributes=['cn','gidNumber'])
#dc=test,dc=com" "(&(cn=*)(memberUid=skimeer))
print(con.entries[0].entry_to_ldif())
#print(con.entries[1].entry_to_ldif())
print ("using dicts")
for entry in con.entries:
#print(entry.dn)
print(entry.cn)
return HttpResponse("Hello, world. You're at the devices index.")
@login_required(login_url='/admin/login/?next=/admin/')
def resetPsw(request, device_id):
template = loader.get_template('devices/resetPsw.html')
my_objects = get_list_or_404(Device,id=device_id, account=request.user)
mydevice= my_objects[0]
password= id_generator()
logger.debug("device psw:%s",password)
if(createLDAPDevice(mydevice, password)):
addToLDAPGroup(mydevice.device_id,'device')
output=""
topicFormat= str(mydevice.account.id)+"."+str(mydevice.device_id)+".*.*"
topic=str(request.user.id)+"."+str(mydevice.device_id)+".test.1"
logger.debug("trying to send mqtt for topic %s",topic)
payload={"test"}
mqttConnResult=connectToMqtt(mydevice.device_id,password,topic,payload)
context = {
'content':output,
'username': mydevice.device_id,
'password': password,
'mqttConnResult': mqttConnResult,
'mqttHost':settings.MQTT['host'],
'mqttPort':settings.MQTT['port'],
'caFilePath':settings.MQTT['path_to_ca_cert'],
'topicFormat': topicFormat,
'has_permission':request.user.is_authenticated,
'is_popup':False,
'title':'Reset device password',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
elif(resetLDAPpassword(mydevice.device_id,password)):
output=""
topicFormat= str(mydevice.account.id)+"."+str(mydevice.device_id)+".*.*"
topic=str(mydevice.account.id)+"."+str(mydevice.device_id)+".test.1"
logger.debug("trying to send mqtt for topic %s",topic)
payload={"test"}
mqttConnResult=connectToMqtt(mydevice.device_id,password,topic,payload)
context = {
'content':output,
'username': mydevice.device_id,
'password': password,
'mqttConnResult': mqttConnResult,
'mqttHost':settings.MQTT['host'],
'mqttPort':settings.MQTT['port'],
'caFilePath':settings.MQTT['path_to_ca_cert'],
'topicFormat': topicFormat,
'has_permission':request.user.is_authenticated,
'is_popup':False,
'title':'Reset device password',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
else:
context = {
'content':"We were unable to create your device password. Try a different device ID or contact your administrator",
'username': mydevice.device_id,
'password': "",
'mqttConnResult': None,
'mqttHost':settings.MQTT['host'],
'mqttPort':settings.MQTT['port'],
'caFilePath':settings.MQTT['path_to_ca_cert'],
'topicFormat': "",
'has_permission':request.user.is_authenticated,
'is_popup':False,
'title':'Unable to reset device password',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@login_required(login_url='/admin/login/?next=/admin/')
def testMessage(request):
#this allows user to feed messages into the same process
#functions that we use in the automatic mqtt processing
status_list=constructStatusList(request)
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = TestMessageForm(request.POST,user=request.user)
# check whether it's valid:
if form.is_valid():
#check that user is sending data on their own account
logger.debug('form valid, processing message')
#encode utf-8 because this is the way text messages are received from rabbitmq
testMsg = MqttData(form.cleaned_data['topic'],form.cleaned_data['message'].encode('utf-8'))
mqttChecksList=processMessages(testMsg)
myTopic=TopicData(form.cleaned_data['topic'])
lastInflux= getLastInflux(request.user,myTopic.device,myTopic.channel)
context = {
'has_permission':request.user.is_authenticated,
'is_popup':False,
'form':form,
'mqttChecksList':mqttChecksList,
'title':'Send test message',
'site_title':'zibawa',
'status_list':status_list,
'site_url':settings.SITE_URL
}
return render(request,'devices/testMessageForm.html',context)
logger.debug('form not valid, reprinting form')
# if a GET (or any other method) we'll create a blank form
else:
form = TestMessageForm(user=request.user)
logger.debug('no post data received reprinting form')
context = {
'has_permission':request.user.is_authenticated,
'is_popup':False,
'form':form,
'title':'Send test message',
'mqttChecksList':None,
'site_title':'zibawa',
'status_list':status_list,
'site_url':settings.SITE_URL
}
return render(request, 'devices/testMessageForm.html', context)
def download_CA_cert(request):
try:
file = open(settings.MQTT['path_to_ca_cert'], 'r')
file.seek(0)
cert = file.read()
file.close()
response = HttpResponse(cert, content_type='application/x-pem-file')
response['Content-Disposition'] = 'attachment; filename="zibawa_mqtt_ca_cert.pem"'
except:
response= HttpResponseNotFound('404 - Not found')
return response | zibawa/zibawa | devices/views.py | Python | gpl-3.0 | 7,956 |
import urllib
import logging
import random
from datetime import datetime
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.db import Key
class Move(db.Model):
move = db.StringProperty()
ctime = db.DateTimeProperty(auto_now_add=True)
para = db.StringProperty()
charm_hand = db.StringProperty()
charm_gesture = db.StringProperty()
has_para = db.IntegerProperty()
has_charm = db.IntegerProperty()
class Duel(db.Model):
chicken = db.IntegerProperty()
ctime = db.DateTimeProperty(auto_now_add=True)
now_turn = db.IntegerProperty()
received_count = db.IntegerProperty()
level = db.StringProperty()
class Account(db.Model):
ctime = db.DateTimeProperty(auto_now_add=True)
nonce = db.StringProperty()
level = db.IntegerProperty()
class User(db.Model):
ctime = db.DateTimeProperty(auto_now_add=True)
atime = db.DateTimeProperty(auto_now_add=True)
name = db.StringProperty()
level = db.IntegerProperty()
state = db.IntegerProperty()
"""
States:
0 I'm idle.
1 I propose a duel.
2 Somebody accepted my challenge.
3 I acknowledge someone's acceptance.
4 I accepted somebody's challenge.
9 I fled a duel.
"""
arg = db.StringProperty()
duel = db.StringProperty()
class MainPage(webapp.RequestHandler):
def get(self):
if "" == self.request.query_string:
self.response.out.write("2")
return
cmd = self.request.get("c")
"""
if "deldeldel" == cmd:
logging.info("cleanup")
stuff = db.GqlQuery("SELECT * FROM Move")
for thing in stuff:
thing.delete()
return
"""
def logoff(userkey):
def del_user(userkey):
user = db.get(userkey)
if not user:
return None
user.delete()
return user
u = db.run_in_transaction(del_user, userkey)
if None == u:
logging.error("User already deleted.")
return
def del_acct():
acct = db.get(Key.from_path("Account", "n:" + u.name))
if not acct:
logging.error("Missing account for user.")
return
acct.delete()
db.run_in_transaction(del_acct)
if "l" == cmd: # Login.
name = urllib.unquote(self.request.get("a"))
b = self.request.get("b")
if "" == b:
logging.error("No level supplied.")
self.response.out.write("Error: No level supplied.")
return
level = int(b)
logging.info("login: " + name)
# TODO: Handle other bad names.
if "" == name:
logging.error("Empty name.")
self.response.out.write("Error: Empty name.")
return
def handle_login():
acct = db.get(Key.from_path("Account", "n:" + name))
if not acct:
acct = Account(key_name="n:" + name, level=level,
nonce="%X" % random.getrandbits(64))
acct.put()
return acct.nonce
else:
return ""
nonce = db.run_in_transaction(handle_login)
if "" == nonce:
self.response.out.write("Error: Name already in use.")
else:
user = User(key_name="n:" + nonce, name=name, state=0, arg="")
user.put()
self.response.out.write(nonce)
return
if "L" == cmd: # Logoff.
nonce = self.request.get("i")
logging.info("logout: " + nonce)
logoff(Key.from_path("User", "n:" + nonce))
return
if "r" == cmd: # Lobby refresh.
nonce = self.request.get("i")
def heartbeat():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user: return False, None
user.atime = datetime.now()
# Someone accepted the duel.
if 2 == user.state:
user.state = 3
user.put()
return True, user
user.put()
return False, user
flag, user = db.run_in_transaction(heartbeat)
if not user:
self.response.out.write("Error: No such user ID.")
return
if flag:
self.response.out.write("\n" + user.arg + "\n" + user.duel)
return
users = db.GqlQuery("SELECT * FROM User")
for u in users:
self.response.out.write(u.name + '\n')
self.response.out.write(unicode(u.state) + '\n')
self.response.out.write(u.arg + '\n')
if 0 == u.state or 1 == u.state:
if user.atime > u.atime and (user.atime - u.atime).seconds >= 12:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
elif 9 == u.state:
# TODO: When net games become more robust, punish fleeing wizards
# with longer login bans.
if user.atime > u.atime and (user.atime - u.atime).seconds >= 4:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
# TODO: Uptime user.atime in SetMove and lower timeout to a few minutes.
elif user.atime > u.atime and (user.atime - u.atime).seconds >= 2048:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
return
if "n" == cmd: # New duel.
logging.info("New duel.")
a = self.request.get("a")
if "" == a:
logging.error("No level supplied.")
self.response.out.write("Error: No level supplied.")
return
level = int(a)
if level < 1 or level > 5:
logging.error("Bad level.")
self.response.out.write("Error: Bad level.")
return
nonce = self.request.get("i")
def new_duel():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user: return -2
user.atime = datetime.now()
if 0 == user.state:
user.state = 1
user.arg = a
user.put()
return 0
user.put()
return -1
status = db.run_in_transaction(new_duel)
if -2 == status:
logging.error("No such user.")
self.response.out.write("Error: No such user.")
elif -1 == status:
logging.error("User already started duel.")
self.response.out.write("Error: Already started duel.")
else:
self.response.out.write("OK")
return
if "N" == cmd: # Accept duel.
logging.info("Accept duel.")
a = urllib.unquote(self.request.get("a"))
if "" == a:
logging.error("Error: No opponent supplied.")
return
nonce = self.request.get("i")
duelid = "%X" % random.getrandbits(64)
def mark_user():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user:
return 0, "", None, -1
user.atime = datetime.now()
origstate = user.state
origarg = user.arg
# Can't accept a duel if you were advertising one and someone just
# accepted (but you don't know yet). Also can't accept a duel if
# already in one.
if 1 != user.state and 0 != user.state:
return 0, "", None, -2
user.state = 4
user.arg = a
user.duel = duelid
user.put()
return origstate, origarg, user, 0
origstate, origarg, user, status = db.run_in_transaction(mark_user)
if -1 == status:
self.response.out.write("Error: No such user ID.")
return
if -2 == status:
logging.warning("Already dueling. Ignoring.")
return
def restore():
def restore_state_arg(i, s):
user = db.get(Key.from_path("User", "n:" + nonce))
if user:
user.state = i
user.arg = s
user.put()
db.run_in_transaction(restore_state_arg, origstate, origarg)
return
acct = db.get(Key.from_path("Account", "n:" + a))
if not acct:
restore()
self.response.out.write("Error: Opponent unavailable.")
return
def accept_duel():
opp = db.get(Key.from_path("User", "n:" + acct.nonce))
if not opp: return ""
if 1 != opp.state: return ""
opp.state = 2
level = opp.arg
opp.arg = user.name
opp.duel = duelid
opp.put()
return level
level = db.run_in_transaction(accept_duel)
if "" == level:
self.response.out.write("Error: Opponent unavailable.")
restore()
logging.error("accept_duel failed.")
return
duel = Duel(key_name = "g:" + duelid,
level = level,
now_turn = 0,
received_count = 0)
duel.put()
self.response.out.write(duelid)
logging.info("Response: " + duelid)
return
gamename = self.request.get("g")
if "f" == cmd:
logging.info("Game " + gamename + " finished.")
nonce = self.request.get("i")
def restate_user():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user:
return None
user.atime = datetime.now()
user.state = 0
user.put()
return user
user = db.run_in_transaction(restate_user)
if not user:
self.response.out.write("Error: No such user ID.")
else:
self.response.out.write("OK")
def del_game():
game = Duel.get_by_key_name("g:" + gamename)
if game:
game.delete() # TODO: Also delete moves.
db.run_in_transaction(del_game)
return
game = Duel.get_by_key_name("g:" + gamename)
if not game:
logging.error("No such game: " + gamename)
self.response.out.write("Error: No such game.")
return
gamekey = game.key()
playerid = self.request.get("i")
if "D" == cmd:
def set_chicken():
game = db.get(gamekey)
if game:
game.chicken = 1
game.put()
db.run_in_transaction(set_chicken)
logging.info(gamename + ":" + playerid + " flees!")
def chicken_user():
user = db.get(Key.from_path("User", "n:" + playerid))
if not user:
return None
user.atime = datetime.now()
user.state = 9
user.put()
return user
db.run_in_transaction(chicken_user)
#logoff(Key.from_path("User", "n:" + playerid))
self.response.out.write("Chicken!")
return
if ('0' != playerid) and ('1' != playerid):
logging.error("Bad player ID.")
self.response.out.write("Error: Bad player ID.")
return
def CommandSetMove():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
a = self.request.get("a")
if "" == a:
logging.error("Error: Bad move.")
return
logging.info("SetMove " + gamename + ":" + turn_index +
":" + playerid + " " + a)
moveid = "m:" + gamename + turn_index + playerid
move = Move.get_by_key_name(moveid)
if move:
logging.warning("Move sent twice: ignored.")
self.response.out.write("OK")
return
else:
move = Move(key_name = moveid,
has_charm = 0,
has_para = 0)
move.move = a
move.put()
turn_int = int(turn_index)
def increment_received_count():
game = db.get(gamekey)
if game.now_turn == turn_int:
if 2 == game.received_count:
logging.error("received_count > 2!")
else:
game.received_count = game.received_count + 1
elif game.now_turn == turn_int - 1:
if 2 > game.received_count:
logging.error("incrementing turn though received_count < 2!")
game.now_turn = turn_int
game.received_count = 1
elif game.now_turn > turn_int:
logging.error("received ancient move!")
elif game.now_turn < turn_int - 1:
logging.error("received future move!")
game.put()
db.run_in_transaction(increment_received_count)
logging.info("rcount " + unicode(db.get(gamekey).received_count))
self.response.out.write("OK")
def CommandGetMove():
if game.chicken:
self.response.out.write('CHICKEN')
# TODO: Destroy this game.
return
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
turn_int = int(turn_index)
if game.now_turn > turn_int or (game.now_turn == turn_int and 2 == game.received_count):
logging.info("GetMove " + gamename + ":" + turn_index +
":" + playerid + " " + unicode(game.received_count))
moveid = "m:" + gamename + turn_index + unicode(1 - int(playerid))
move = Move.get_by_key_name(moveid)
if not move:
logging.error('Error: Cannot find move!')
else:
self.response.out.write(move.move)
else:
self.response.out.write('-')
return
def CommandSetPara():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
target = self.request.get("a")
if "" == target:
logging.error("Error: Bad paralysis target.")
return
if "0" == target:
targetid = playerid
else:
targetid = unicode(1 - int(playerid))
gesture = self.request.get("b")
if "" == gesture:
logging.error("Error: Bad paralysis gesture.")
return
moveid = "m:" + gamename + turn_index + targetid
logging.info("SetPara " + moveid)
move = Move.get_by_key_name(moveid)
if not move:
logging.error('Error: Cannot find move!')
return
if (1 == move.has_para):
logging.error("Error: Already received paralysis.")
return
def put_para(key):
move = db.get(key)
move.para = gesture
move.has_para = 1
move.put()
db.run_in_transaction(put_para, move.key())
self.response.out.write("OK")
return
def CommandGetPara():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
target = self.request.get("a")
if "" == target:
logging.error("Error: Bad paralysis target.")
return
if "0" == target:
targetid = playerid
else:
targetid = unicode(1 - int(playerid))
moveid = "m:" + gamename + turn_index + targetid
move = Move.get_by_key_name(moveid)
if not move:
logging.error('Error: Cannot find move!')
return
if 0 == move.has_para:
self.response.out.write("-")
else:
self.response.out.write(move.para)
return
def CommandSetCharm():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
# This is unnecessary as we always assume target is opponent.
target = self.request.get("a")
if "" == target:
self.response.out.write("Error: Bad charm target.")
return
s = self.request.get("b")
if "" == s:
self.response.out.write("Error: Bad charm choices.")
return
logging.info("SetCharm " + gamename + ":" + playerid + " " + target + " " + s)
moveid = "m:" + gamename + turn_index + unicode(1 - int(playerid))
logging.info("Charm " + moveid)
move = Move.get_by_key_name(moveid)
if not move:
self.response.out.write('Error: Cannot find move!')
return
if (1 == move.has_charm):
self.response.out.write("Error: Already received charm.")
return
def put_charm(key):
move = db.get(key)
move.charm_hand = s[0]
move.charm_gesture = s[1]
move.has_charm = 1
move.put()
db.run_in_transaction(put_charm, move.key())
self.response.out.write("OK")
return
def CommandGetCharmHand():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
moveid = "m:" + gamename + turn_index + playerid
move = Move.get_by_key_name(moveid)
if not move:
logging.error('Error: Cannot find move!')
return
if 0 == move.has_charm:
self.response.out.write("-")
else:
self.response.out.write(move.charm_hand)
return
def CommandGetCharmGesture():
turn_index = self.request.get("j")
if "" == turn_index:
logging.error("Error: No turn index.")
return
moveid = "m:" + gamename + turn_index + playerid
move = Move.get_by_key_name(moveid)
if not move:
logging.error('Error: Cannot find move!')
return
if 0 == move.has_charm:
self.response.out.write("-")
else:
self.response.out.write(move.charm_gesture)
return
def CommandBad():
logging.error("Error: Bad command.")
return
{'m' : CommandSetMove,
'g' : CommandGetMove,
'p' : CommandSetPara,
'q' : CommandGetPara,
'C' : CommandSetCharm,
'H' : CommandGetCharmHand,
'G' : CommandGetCharmGesture,
}.get(cmd, CommandBad)()
application = webapp.WSGIApplication(
[('/', MainPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| blynn/spelltapper | app/spelltapper.py | Python | gpl-3.0 | 15,729 |
# coding: utf-8
# In[2]:
# Import and read the datset
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("C://Users//Koyel//Desktop/MieRobotAdvert.csv")
dataset.head()
# In[3]:
dataset.describe()
# In[4]:
dataset.columns
# In[5]:
import seaborn as sns
get_ipython().magic('matplotlib inline')
sns.pairplot(dataset)
# In[6]:
sns.heatmap(dataset.corr())
# In[7]:
dataset.columns
# In[8]:
X = dataset[['Facebook', 'Twitter', 'Google']]
y = dataset['Hits']
# In[9]:
from sklearn.model_selection import train_test_split
# In[10]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
# In[11]:
from sklearn.linear_model import LinearRegression
# In[12]:
lm = LinearRegression()
# In[13]:
lm.fit(X_train,y_train)
# In[14]:
print(lm.intercept_)
# In[15]:
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Calculated Coefficient'])
coeff_df
# In[17]:
predictions = lm.predict(X_test)
# In[26]:
plt.ylabel("likes predicted")
plt.title("Likes predicated for MieRobot.com blogs",color='r')
plt.scatter(y_test,predictions)
# In[23]:
print (lm.score)
# In[19]:
sns.distplot((y_test-predictions),bins=50);
# In[20]:
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# In[ ]:
| MieRobot/Blogs | Blog_LinearRegression.py | Python | gpl-3.0 | 1,518 |
#!/usr/bin/python
from src.sqllist import get_sql
_UPDATER_EXTRAS = {
'runningcatalog': ['runcatid'],
'runningcatalog_fluxes': ['runcat_id', 'band', 'stokes'],
}
def _refactor_update(sql):
"""
Special refactoring for MonetDB update..from imitation.
"""
def _get_extra_conditions(tabname):
return ' '.join(map(lambda x: 'and {0}.{1} = x.{1}'.format(tabname, x),
_UPDATER_EXTRAS[tabname]))
sqlupdate, sqlfrom = sql.strip().split('from', 1)
table, sqlupd_list = sqlupdate.split('set')
sqlupd_list = sqlupd_list.split(',')
table = table.split()[1]
if sqlfrom.endswith(';'):
sqlfrom = sqlfrom[:-1]
sqlfrom_split = sqlfrom.split('where', 1)
if len(sqlfrom_split) > 1:
[sqlfrom2, sqlwhere] = sqlfrom_split
sqlwhere = 'where %s' % sqlwhere
else:
sqlfrom2 = sqlfrom
sqlwhere = ''
for field in _UPDATER_EXTRAS[table]:
sqlwhere = sqlwhere.replace('%s.%s' % (table, field), 'x.%s' % field)
update_field = []
for sqlf in sqlupd_list:
field, update_stmt = sqlf.split('=')
update_field.append('%s = (select %s from %s x, %s %s %s)' % (
field, update_stmt.replace(table, 'x'),
table, sqlfrom2, sqlwhere,
_get_extra_conditions(table)))
result = []
for field in update_field:
result.append("""update %s set %s
where exists (select 1 from %s);""" % (table, field, sqlfrom))
return result
def run_update(conn, sql_name, *params):
"""
Run update on a given connection. Refactor it for MonetDB if needed.
"""
sql = get_sql(sql_name, *params)
if conn.is_monet():
conn.execute_set(_refactor_update(sql))
else:
conn.execute(sql)
| jjdmol/LOFAR | CEP/GSM/bremen/src/updater.py | Python | gpl-3.0 | 1,838 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import datetime, math
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.hr.doctype.payroll_entry.payroll_entry import get_start_end_dates
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.utilities.transaction_base import TransactionBase
from frappe.utils.background_jobs import enqueue
from erpnext.hr.doctype.additional_salary.additional_salary import get_additional_salary_component
from erpnext.hr.doctype.payroll_period.payroll_period import get_period_factor, get_payroll_period
from erpnext.hr.doctype.employee_benefit_application.employee_benefit_application import get_benefit_component_amount
from erpnext.hr.doctype.employee_benefit_claim.employee_benefit_claim import get_benefit_claim_amount, get_last_payroll_period_benefits
class SalarySlip(TransactionBase):
def __init__(self, *args, **kwargs):
super(SalarySlip, self).__init__(*args, **kwargs)
self.series = 'Sal Slip/{0}/.#####'.format(self.employee)
self.whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round,
"date": datetime.date,
"getdate": getdate
}
def autoname(self):
self.name = make_autoname(self.series)
def validate(self):
self.status = self.get_status()
self.validate_dates()
self.check_existing()
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
if not (len(self.get("earnings")) or len(self.get("deductions"))):
# get details from salary structure
self.get_emp_and_leave_details()
else:
self.get_leave_details(lwp = self.leave_without_pay)
self.calculate_net_pay()
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
if frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet"):
max_working_hours = frappe.db.get_single_value("HR Settings", "max_working_hours_against_timesheet")
if self.salary_slip_based_on_timesheet and (self.total_working_hours > int(max_working_hours)):
frappe.msgprint(_("Total working hours should not be greater than max working hours {0}").
format(max_working_hours), alert=True)
def on_submit(self):
if self.net_pay < 0:
frappe.throw(_("Net Pay cannot be less than 0"))
else:
self.set_status()
self.update_status(self.name)
self.update_salary_slip_in_additional_salary()
if (frappe.db.get_single_value("HR Settings", "email_salary_slip_to_employee")) and not frappe.flags.via_payroll_entry:
self.email_salary_slip()
def on_cancel(self):
self.set_status()
self.update_status()
self.update_salary_slip_in_additional_salary()
def on_trash(self):
from frappe.model.naming import revert_series_if_last
revert_series_if_last(self.series, self.name)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
elif self.docstatus == 2:
status = "Cancelled"
return status
def validate_dates(self):
if date_diff(self.end_date, self.start_date) < 0:
frappe.throw(_("To date cannot be before From date"))
def check_existing(self):
if not self.salary_slip_based_on_timesheet:
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where start_date = %s and end_date = %s and docstatus != 2
and employee = %s and name != %s""",
(self.start_date, self.end_date, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this period").format(self.employee))
else:
for data in self.timesheets:
if frappe.db.get_value('Timesheet', data.time_sheet, 'status') == 'Payrolled':
frappe.throw(_("Salary Slip of employee {0} already created for time sheet {1}").format(self.employee, data.time_sheet))
def get_date_details(self):
if not self.end_date:
date_details = get_start_end_dates(self.payroll_frequency, self.start_date or self.posting_date)
self.start_date = date_details.start_date
self.end_date = date_details.end_date
def get_emp_and_leave_details(self):
'''First time, load all the components from salary structure'''
if self.employee:
self.set("earnings", [])
self.set("deductions", [])
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.validate_dates()
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
self.get_leave_details(joining_date, relieving_date)
struct = self.check_sal_struct(joining_date, relieving_date)
if struct:
self._salary_structure_doc = frappe.get_doc('Salary Structure', struct)
self.salary_slip_based_on_timesheet = self._salary_structure_doc.salary_slip_based_on_timesheet or 0
self.set_time_sheet()
self.pull_sal_struct()
def set_time_sheet(self):
if self.salary_slip_based_on_timesheet:
self.set("timesheets", [])
timesheets = frappe.db.sql(""" select * from `tabTimesheet` where employee = %(employee)s and start_date BETWEEN %(start_date)s AND %(end_date)s and (status = 'Submitted' or
status = 'Billed')""", {'employee': self.employee, 'start_date': self.start_date, 'end_date': self.end_date}, as_dict=1)
for data in timesheets:
self.append('timesheets', {
'time_sheet': data.name,
'working_hours': data.total_hours
})
def check_sal_struct(self, joining_date, relieving_date):
cond = """and sa.employee=%(employee)s and (sa.from_date <= %(start_date)s or
sa.from_date <= %(end_date)s or sa.from_date <= %(joining_date)s)"""
if self.payroll_frequency:
cond += """and ss.payroll_frequency = '%(payroll_frequency)s'""" % {"payroll_frequency": self.payroll_frequency}
st_name = frappe.db.sql("""
select sa.salary_structure
from `tabSalary Structure Assignment` sa join `tabSalary Structure` ss
where sa.salary_structure=ss.name
and sa.docstatus = 1 and ss.docstatus = 1 and ss.is_active ='Yes' %s
order by sa.from_date desc
limit 1
""" %cond, {'employee': self.employee, 'start_date': self.start_date,
'end_date': self.end_date, 'joining_date': joining_date})
if st_name:
self.salary_structure = st_name[0][0]
return self.salary_structure
else:
self.salary_structure = None
frappe.msgprint(_("No active or default Salary Structure found for employee {0} for the given dates")
.format(self.employee), title=_('Salary Structure Missing'))
def pull_sal_struct(self):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
if self.salary_slip_based_on_timesheet:
self.salary_structure = self._salary_structure_doc.name
self.hour_rate = self._salary_structure_doc.hour_rate
self.total_working_hours = sum([d.working_hours or 0.0 for d in self.timesheets]) or 0.0
wages_amount = self.hour_rate * self.total_working_hours
self.add_earning_for_hourly_wages(self, self._salary_structure_doc.salary_component, wages_amount)
make_salary_slip(self._salary_structure_doc.name, self)
def get_leave_details(self, joining_date=None, relieving_date=None, lwp=None, for_preview=0):
if not joining_date:
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
working_days = date_diff(self.end_date, self.start_date) + 1
if for_preview:
self.total_working_days = working_days
self.payment_days = working_days
return
holidays = self.get_holidays_for_employee(self.start_date, self.end_date)
actual_lwp = self.calculate_lwp(holidays, working_days)
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
working_days -= len(holidays)
if working_days < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = actual_lwp
elif lwp != actual_lwp:
frappe.msgprint(_("Leave Without Pay does not match with approved Leave Application records"))
self.total_working_days = working_days
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(joining_date, relieving_date)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, joining_date, relieving_date):
start_date = getdate(self.start_date)
if joining_date:
if getdate(self.start_date) <= joining_date <= getdate(self.end_date):
start_date = joining_date
elif joining_date > getdate(self.end_date):
return
end_date = getdate(self.end_date)
if relieving_date:
if getdate(self.start_date) <= relieving_date <= getdate(self.end_date):
end_date = relieving_date
elif relieving_date < getdate(self.start_date):
frappe.throw(_("Employee relieved on {0} must be set as 'Left'")
.format(relieving_date))
payment_days = date_diff(end_date, start_date) + 1
if not cint(frappe.db.get_value("HR Settings", None, "include_holidays_in_total_working_days")):
holidays = self.get_holidays_for_employee(start_date, end_date)
payment_days -= len(holidays)
return payment_days
def get_holidays_for_employee(self, start_date, end_date):
holiday_list = get_holiday_list_for_employee(self.employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
def calculate_lwp(self, holidays, working_days):
lwp = 0
holidays = "','".join(holidays)
for d in range(working_days):
dt = add_days(cstr(getdate(self.start_date)), d)
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and t2.is_lwp = 1
and t1.docstatus = 1
and t1.employee = %(employee)s
and CASE WHEN t2.include_holiday != 1 THEN %(dt)s not in ('{0}') and %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
WHEN t2.include_holiday THEN %(dt)s between from_date and to_date and ifnull(t1.salary_slip, '') = ''
END
""".format(holidays), {"employee": self.employee, "dt": dt})
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def add_earning_for_hourly_wages(self, doc, salary_component, amount):
row_exists = False
for row in doc.earnings:
if row.salary_component == salary_component:
row.amount = amount
row_exists = True
break
if not row_exists:
wages_row = {
"salary_component": salary_component,
"abbr": frappe.db.get_value("Salary Component", salary_component, "salary_component_abbr"),
"amount": self.hour_rate * self.total_working_hours,
"default_amount": 0.0,
"additional_amount": 0.0
}
doc.append('earnings', wages_row)
def calculate_net_pay(self):
if self.salary_structure:
self.calculate_component_amounts()
self.gross_pay = self.get_component_totals("earnings")
self.total_deduction = self.get_component_totals("deductions")
self.set_loan_repayment()
self.net_pay = flt(self.gross_pay) - (flt(self.total_deduction) + flt(self.total_loan_repayment))
self.rounded_total = rounded(self.net_pay)
def calculate_component_amounts(self):
if not getattr(self, '_salary_structure_doc', None):
self._salary_structure_doc = frappe.get_doc('Salary Structure', self.salary_structure)
payroll_period = get_payroll_period(self.start_date, self.end_date, self.company)
self.add_structure_components()
self.add_employee_benefits(payroll_period)
self.add_additional_salary_components()
self.add_tax_components(payroll_period)
self.set_component_amounts_based_on_payment_days()
def add_structure_components(self):
data = self.get_data_for_eval()
for key in ('earnings', 'deductions'):
for struct_row in self._salary_structure_doc.get(key):
amount = self.eval_condition_and_formula(struct_row, data)
if amount and struct_row.statistical_component == 0:
self.update_component_row(struct_row, amount, key)
def get_data_for_eval(self):
'''Returns data for evaluating formula'''
data = frappe._dict()
data.update(frappe.get_doc("Salary Structure Assignment",
{"employee": self.employee, "salary_structure": self.salary_structure}).as_dict())
data.update(frappe.get_doc("Employee", self.employee).as_dict())
data.update(self.as_dict())
# set values for components
salary_components = frappe.get_all("Salary Component", fields=["salary_component_abbr"])
for sc in salary_components:
data.setdefault(sc.salary_component_abbr, 0)
for key in ('earnings', 'deductions'):
for d in self.get(key):
data[d.abbr] = d.amount
return data
def eval_condition_and_formula(self, d, data):
try:
condition = d.condition.strip() if d.condition else None
if condition:
if not frappe.safe_eval(condition, self.whitelisted_globals, data):
return None
amount = d.amount
if d.amount_based_on_formula:
formula = d.formula.strip() if d.formula else None
if formula:
amount = flt(frappe.safe_eval(formula, self.whitelisted_globals, data), d.precision("amount"))
if amount:
data[d.abbr] = amount
return amount
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in formula or condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def add_employee_benefits(self, payroll_period):
for struct_row in self._salary_structure_doc.get("earnings"):
if struct_row.is_flexible_benefit == 1:
if frappe.db.get_value("Salary Component", struct_row.salary_component, "pay_against_benefit_claim") != 1:
benefit_component_amount = get_benefit_component_amount(self.employee, self.start_date, self.end_date,
struct_row.salary_component, self._salary_structure_doc, self.payroll_frequency, payroll_period)
if benefit_component_amount:
self.update_component_row(struct_row, benefit_component_amount, "earnings")
else:
benefit_claim_amount = get_benefit_claim_amount(self.employee, self.start_date, self.end_date, struct_row.salary_component)
if benefit_claim_amount:
self.update_component_row(struct_row, benefit_claim_amount, "earnings")
self.adjust_benefits_in_last_payroll_period(payroll_period)
def adjust_benefits_in_last_payroll_period(self, payroll_period):
if payroll_period:
if (getdate(payroll_period.end_date) <= getdate(self.end_date)):
last_benefits = get_last_payroll_period_benefits(self.employee, self.start_date, self.end_date,
payroll_period, self._salary_structure_doc)
if last_benefits:
for last_benefit in last_benefits:
last_benefit = frappe._dict(last_benefit)
amount = last_benefit.amount
self.update_component_row(frappe._dict(last_benefit.struct_row), amount, "earnings")
def add_additional_salary_components(self):
additional_components = get_additional_salary_component(self.employee, self.start_date, self.end_date)
if additional_components:
for additional_component in additional_components:
amount = additional_component.amount
overwrite = additional_component.overwrite
key = "earnings" if additional_component.type == "Earning" else "deductions"
self.update_component_row(frappe._dict(additional_component.struct_row), amount, key, overwrite=overwrite)
def add_tax_components(self, payroll_period):
# Calculate variable_based_on_taxable_salary after all components updated in salary slip
tax_components, other_deduction_components = [], []
for d in self._salary_structure_doc.get("deductions"):
if d.variable_based_on_taxable_salary == 1 and not d.formula and not flt(d.amount):
tax_components.append(d.salary_component)
else:
other_deduction_components.append(d.salary_component)
if not tax_components:
tax_components = [d.name for d in frappe.get_all("Salary Component", filters={"variable_based_on_taxable_salary": 1})
if d.name not in other_deduction_components]
for d in tax_components:
tax_amount = self.calculate_variable_based_on_taxable_salary(d, payroll_period)
tax_row = self.get_salary_slip_row(d)
self.update_component_row(tax_row, tax_amount, "deductions")
def update_component_row(self, struct_row, amount, key, overwrite=1):
component_row = None
for d in self.get(key):
if d.salary_component == struct_row.salary_component:
component_row = d
if not component_row:
if amount:
self.append(key, {
'amount': amount,
'default_amount': amount if not struct_row.get("is_additional_component") else 0,
'depends_on_payment_days' : struct_row.depends_on_payment_days,
'salary_component' : struct_row.salary_component,
'abbr' : struct_row.abbr,
'do_not_include_in_total' : struct_row.do_not_include_in_total,
'is_tax_applicable': struct_row.is_tax_applicable,
'is_flexible_benefit': struct_row.is_flexible_benefit,
'variable_based_on_taxable_salary': struct_row.variable_based_on_taxable_salary,
'deduct_full_tax_on_selected_payroll_date': struct_row.deduct_full_tax_on_selected_payroll_date,
'additional_amount': amount if struct_row.get("is_additional_component") else 0
})
else:
if struct_row.get("is_additional_component"):
if overwrite:
component_row.additional_amount = amount - component_row.get("default_amount", 0)
else:
component_row.additional_amount = amount
if not overwrite and component_row.default_amount:
amount += component_row.default_amount
else:
component_row.default_amount = amount
component_row.amount = amount
component_row.deduct_full_tax_on_selected_payroll_date = struct_row.deduct_full_tax_on_selected_payroll_date
def calculate_variable_based_on_taxable_salary(self, tax_component, payroll_period):
if not payroll_period:
frappe.msgprint(_("Start and end dates not in a valid Payroll Period, cannot calculate {0}.")
.format(tax_component))
return
# Deduct taxes forcefully for unsubmitted tax exemption proof and unclaimed benefits in the last period
if payroll_period.end_date <= getdate(self.end_date):
self.deduct_tax_for_unsubmitted_tax_exemption_proof = 1
self.deduct_tax_for_unclaimed_employee_benefits = 1
return self.calculate_variable_tax(payroll_period, tax_component)
def calculate_variable_tax(self, payroll_period, tax_component):
# get remaining numbers of sub-period (period for which one salary is processed)
remaining_sub_periods = get_period_factor(self.employee,
self.start_date, self.end_date, self.payroll_frequency, payroll_period)[1]
# get taxable_earnings, paid_taxes for previous period
previous_taxable_earnings = self.get_taxable_earnings_for_prev_period(payroll_period.start_date, self.start_date)
previous_total_paid_taxes = self.get_tax_paid_in_period(payroll_period.start_date, self.start_date, tax_component)
# get taxable_earnings for current period (all days)
current_taxable_earnings = self.get_taxable_earnings()
future_structured_taxable_earnings = current_taxable_earnings.taxable_earnings * (math.ceil(remaining_sub_periods) - 1)
# get taxable_earnings, addition_earnings for current actual payment days
current_taxable_earnings_for_payment_days = self.get_taxable_earnings(based_on_payment_days=1)
current_structured_taxable_earnings = current_taxable_earnings_for_payment_days.taxable_earnings
current_additional_earnings = current_taxable_earnings_for_payment_days.additional_income
current_additional_earnings_with_full_tax = current_taxable_earnings_for_payment_days.additional_income_with_full_tax
# Get taxable unclaimed benefits
unclaimed_taxable_benefits = 0
if self.deduct_tax_for_unclaimed_employee_benefits:
unclaimed_taxable_benefits = self.calculate_unclaimed_taxable_benefits(payroll_period)
unclaimed_taxable_benefits += current_taxable_earnings_for_payment_days.flexi_benefits
# Total exemption amount based on tax exemption declaration
total_exemption_amount, other_incomes = self.get_total_exemption_amount_and_other_incomes(payroll_period)
# Total taxable earnings including additional and other incomes
total_taxable_earnings = previous_taxable_earnings + current_structured_taxable_earnings + future_structured_taxable_earnings \
+ current_additional_earnings + other_incomes + unclaimed_taxable_benefits - total_exemption_amount
# Total taxable earnings without additional earnings with full tax
total_taxable_earnings_without_full_tax_addl_components = total_taxable_earnings - current_additional_earnings_with_full_tax
# Structured tax amount
total_structured_tax_amount = self.calculate_tax_by_tax_slab(payroll_period, total_taxable_earnings_without_full_tax_addl_components)
current_structured_tax_amount = (total_structured_tax_amount - previous_total_paid_taxes) / remaining_sub_periods
# Total taxable earnings with additional earnings with full tax
full_tax_on_additional_earnings = 0.0
if current_additional_earnings_with_full_tax:
total_tax_amount = self.calculate_tax_by_tax_slab(payroll_period, total_taxable_earnings)
full_tax_on_additional_earnings = total_tax_amount - total_structured_tax_amount
current_tax_amount = current_structured_tax_amount + full_tax_on_additional_earnings
if flt(current_tax_amount) < 0:
current_tax_amount = 0
return current_tax_amount
def get_taxable_earnings_for_prev_period(self, start_date, end_date):
taxable_earnings = frappe.db.sql("""
select sum(sd.amount)
from
`tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='earnings'
and sd.is_tax_applicable=1
and is_flexible_benefit=0
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(from_date)s and %(to_date)s
and ss.end_date between %(from_date)s and %(to_date)s
""", {
"employee": self.employee,
"from_date": start_date,
"to_date": end_date
})
return flt(taxable_earnings[0][0]) if taxable_earnings else 0
def get_tax_paid_in_period(self, start_date, end_date, tax_component):
# find total_tax_paid, tax paid for benefit, additional_salary
total_tax_paid = flt(frappe.db.sql("""
select
sum(sd.amount)
from
`tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='deductions'
and sd.salary_component=%(salary_component)s
and sd.variable_based_on_taxable_salary=1
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(from_date)s and %(to_date)s
and ss.end_date between %(from_date)s and %(to_date)s
""", {
"salary_component": tax_component,
"employee": self.employee,
"from_date": start_date,
"to_date": end_date
})[0][0])
return total_tax_paid
def get_taxable_earnings(self, based_on_payment_days=0):
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
taxable_earnings = 0
additional_income = 0
additional_income_with_full_tax = 0
flexi_benefits = 0
for earning in self.earnings:
if based_on_payment_days:
amount, additional_amount = self.get_amount_based_on_payment_days(earning, joining_date, relieving_date)
else:
amount, additional_amount = earning.amount, earning.additional_amount
if earning.is_tax_applicable:
if additional_amount:
taxable_earnings += (amount - additional_amount)
additional_income += additional_amount
if earning.deduct_full_tax_on_selected_payroll_date:
additional_income_with_full_tax += additional_amount
continue
if earning.is_flexible_benefit:
flexi_benefits += amount
else:
taxable_earnings += amount
return frappe._dict({
"taxable_earnings": taxable_earnings,
"additional_income": additional_income,
"additional_income_with_full_tax": additional_income_with_full_tax,
"flexi_benefits": flexi_benefits
})
def get_amount_based_on_payment_days(self, row, joining_date, relieving_date):
amount, additional_amount = row.amount, row.additional_amount
if (self.salary_structure and
cint(row.depends_on_payment_days) and cint(self.total_working_days) and
(not self.salary_slip_based_on_timesheet or
getdate(self.start_date) < joining_date or
getdate(self.end_date) > relieving_date
)):
additional_amount = flt((flt(row.additional_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), row.precision("additional_amount"))
amount = flt((flt(row.default_amount) * flt(self.payment_days)
/ cint(self.total_working_days)), row.precision("amount")) + additional_amount
elif not self.payment_days and not self.salary_slip_based_on_timesheet and cint(row.depends_on_payment_days):
amount, additional_amount = 0, 0
elif not row.amount:
amount = flt(row.default_amount) + flt(row.additional_amount)
# apply rounding
if frappe.get_cached_value("Salary Component", row.salary_component, "round_to_the_nearest_integer"):
amount, additional_amount = rounded(amount), rounded(additional_amount)
return amount, additional_amount
def calculate_unclaimed_taxable_benefits(self, payroll_period):
# get total sum of benefits paid
total_benefits_paid = flt(frappe.db.sql("""
select sum(sd.amount)
from `tabSalary Detail` sd join `tabSalary Slip` ss on sd.parent=ss.name
where
sd.parentfield='earnings'
and sd.is_tax_applicable=1
and is_flexible_benefit=1
and ss.docstatus=1
and ss.employee=%(employee)s
and ss.start_date between %(start_date)s and %(end_date)s
and ss.end_date between %(start_date)s and %(end_date)s
""", {
"employee": self.employee,
"start_date": payroll_period.start_date,
"end_date": self.start_date
})[0][0])
# get total benefits claimed
total_benefits_claimed = flt(frappe.db.sql("""
select sum(claimed_amount)
from `tabEmployee Benefit Claim`
where
docstatus=1
and employee=%s
and claim_date between %s and %s
""", (self.employee, payroll_period.start_date, self.end_date))[0][0])
return total_benefits_paid - total_benefits_claimed
def get_total_exemption_amount_and_other_incomes(self, payroll_period):
total_exemption_amount, other_incomes = 0, 0
if self.deduct_tax_for_unsubmitted_tax_exemption_proof:
exemption_proof = frappe.db.get_value("Employee Tax Exemption Proof Submission",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
["exemption_amount", "income_from_other_sources"])
if exemption_proof:
total_exemption_amount, other_incomes = exemption_proof
else:
declaration = frappe.db.get_value("Employee Tax Exemption Declaration",
{"employee": self.employee, "payroll_period": payroll_period.name, "docstatus": 1},
["total_exemption_amount", "income_from_other_sources"])
if declaration:
total_exemption_amount, other_incomes = declaration
return total_exemption_amount, other_incomes
def calculate_tax_by_tax_slab(self, payroll_period, annual_taxable_earning):
payroll_period_obj = frappe.get_doc("Payroll Period", payroll_period)
annual_taxable_earning -= flt(payroll_period_obj.standard_tax_exemption_amount)
data = self.get_data_for_eval()
data.update({"annual_taxable_earning": annual_taxable_earning})
taxable_amount = 0
for slab in payroll_period_obj.taxable_salary_slabs:
if slab.condition and not self.eval_tax_slab_condition(slab.condition, data):
continue
if not slab.to_amount and annual_taxable_earning > slab.from_amount:
taxable_amount += (annual_taxable_earning - slab.from_amount) * slab.percent_deduction *.01
continue
if annual_taxable_earning > slab.from_amount and annual_taxable_earning < slab.to_amount:
taxable_amount += (annual_taxable_earning - slab.from_amount) * slab.percent_deduction *.01
elif annual_taxable_earning > slab.from_amount and annual_taxable_earning > slab.to_amount:
taxable_amount += (slab.to_amount - slab.from_amount) * slab.percent_deduction * .01
return taxable_amount
def eval_tax_slab_condition(self, condition, data):
try:
condition = condition.strip()
if condition:
return frappe.safe_eval(condition, self.whitelisted_globals, data)
except NameError as err:
frappe.throw(_("Name error: {0}".format(err)))
except SyntaxError as err:
frappe.throw(_("Syntax error in condition: {0}".format(err)))
except Exception as e:
frappe.throw(_("Error in formula or condition: {0}".format(e)))
raise
def get_salary_slip_row(self, salary_component):
component = frappe.get_doc("Salary Component", salary_component)
# Data for update_component_row
struct_row = frappe._dict()
struct_row['depends_on_payment_days'] = component.depends_on_payment_days
struct_row['salary_component'] = component.name
struct_row['abbr'] = component.salary_component_abbr
struct_row['do_not_include_in_total'] = component.do_not_include_in_total
struct_row['is_tax_applicable'] = component.is_tax_applicable
struct_row['is_flexible_benefit'] = component.is_flexible_benefit
struct_row['variable_based_on_taxable_salary'] = component.variable_based_on_taxable_salary
return struct_row
def get_component_totals(self, component_type):
total = 0.0
for d in self.get(component_type):
if not d.do_not_include_in_total:
d.amount = flt(d.amount, d.precision("amount"))
total += d.amount
return total
def set_component_amounts_based_on_payment_days(self):
joining_date, relieving_date = frappe.get_cached_value("Employee", self.employee,
["date_of_joining", "relieving_date"])
if not relieving_date:
relieving_date = getdate(self.end_date)
if not joining_date:
frappe.throw(_("Please set the Date Of Joining for employee {0}").format(frappe.bold(self.employee_name)))
for component_type in ("earnings", "deductions"):
for d in self.get(component_type):
d.amount = self.get_amount_based_on_payment_days(d, joining_date, relieving_date)[0]
def set_loan_repayment(self):
self.set('loans', [])
self.total_loan_repayment = 0
self.total_interest_amount = 0
self.total_principal_amount = 0
for loan in self.get_loan_details():
self.append('loans', {
'loan': loan.name,
'total_payment': loan.total_payment,
'interest_amount': loan.interest_amount,
'principal_amount': loan.principal_amount,
'loan_account': loan.loan_account,
'interest_income_account': loan.interest_income_account
})
self.total_loan_repayment += loan.total_payment
self.total_interest_amount += loan.interest_amount
self.total_principal_amount += loan.principal_amount
def get_loan_details(self):
return frappe.db.sql("""select rps.principal_amount, rps.interest_amount, l.name,
rps.total_payment, l.loan_account, l.interest_income_account
from
`tabRepayment Schedule` as rps, `tabLoan` as l
where
l.name = rps.parent and rps.payment_date between %s and %s and
l.repay_from_salary = 1 and l.docstatus = 1 and l.applicant = %s""",
(self.start_date, self.end_date, self.employee), as_dict=True) or []
def update_salary_slip_in_additional_salary(self):
salary_slip = self.name if self.docstatus==1 else None
frappe.db.sql("""
update `tabAdditional Salary` set salary_slip=%s
where employee=%s and payroll_date between %s and %s and docstatus=1
""", (salary_slip, self.employee, self.start_date, self.end_date))
def email_salary_slip(self):
receiver = frappe.db.get_value("Employee", self.employee, "prefered_email")
if receiver:
email_args = {
"recipients": [receiver],
"message": _("Please see attachment"),
"subject": 'Salary Slip - from {0} to {1}'.format(self.start_date, self.end_date),
"attachments": [frappe.attach_print(self.doctype, self.name, file_name=self.name)],
"reference_doctype": self.doctype,
"reference_name": self.name
}
if not frappe.flags.in_test:
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
else:
frappe.sendmail(**email_args)
else:
msgprint(_("{0}: Employee email not found, hence email not sent").format(self.employee_name))
def update_status(self, salary_slip=None):
for data in self.timesheets:
if data.time_sheet:
timesheet = frappe.get_doc('Timesheet', data.time_sheet)
timesheet.salary_slip = salary_slip
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def process_salary_structure(self, for_preview=0):
'''Calculate salary after salary structure details have been updated'''
if not self.salary_slip_based_on_timesheet:
self.get_date_details()
self.pull_emp_details()
self.get_leave_details(for_preview=for_preview)
self.calculate_net_pay()
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee, ["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def process_salary_based_on_leave(self, lwp=0):
self.get_leave_details(lwp=lwp)
self.calculate_net_pay()
def unlink_ref_doc_from_salary_slip(ref_no):
linked_ss = frappe.db.sql_list("""select name from `tabSalary Slip`
where journal_entry=%s and docstatus < 2""", (ref_no))
if linked_ss:
for ss in linked_ss:
ss_doc = frappe.get_doc("Salary Slip", ss)
frappe.db.set_value("Salary Slip", ss_doc.name, "journal_entry", "")
| shubhamgupta123/erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | Python | gpl-3.0 | 34,301 |
#
# This file is part of CAVIAR.
#
# CAVIAR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CAVIAR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CAVIAR. If not, see <http://www.gnu.org/licenses/>.
#
"""
Load balancer module.
"""
class LoadBalancer:
"""
Load balancer.
"""
def __init__(self, ssh_session_fact, das_machine, lb_machine,
master_password):
self.__das_ssh_session = ssh_session_fact.session(
das_machine.appserver_user,
das_machine.host
)
self.__lb_ssh_session = ssh_session_fact.session(
lb_machine.web_user,
lb_machine.host
)
self.__das_machine = das_machine
self.__lb_machine = lb_machine
self.__master_password = master_password
# TODO Test coverage...
@property
def host(self):
return self.__lb_machine.host
# TODO Test coverage...
def prepare(self, domain_name):
any(self.__lb_ssh_session.execute(
self.__lb_machine.ping_cmd()
))
any(self.__das_ssh_session.execute(
self.__das_machine.install_certificates_cmd(
domain_name,
self.__master_password,
self.__lb_machine.host
)
))
def add_instance(self, name, host, port):
"""
Add an instance with the given name, host and port to the load balancer.
:param str name:
Instance name.
:param str host:
Instance host.
:param str port:
Instance port.
"""
any(self.__lb_ssh_session.execute(
self.__lb_machine.add_instance_cmd(name, host, port)
))
def remove_instance(self, name):
"""
Remove the instance with the given name from the load balancer.
:param str name:
Instance name.
"""
any(self.__lb_ssh_session.execute(
self.__lb_machine.remove_instance_cmd(name)
))
| miquelo/caviar | packages/caviar/engine/lb.py | Python | gpl-3.0 | 2,131 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
from buildbot import interfaces
from buildbot.changes import changes
from buildbot.process.properties import Properties
from buildbot.util.service import ClusteredService
from buildbot.util.state import StateMixin
from twisted.internet import defer
from twisted.python import failure
from twisted.python import log
from zope.interface import implements
class BaseScheduler(ClusteredService, StateMixin):
implements(interfaces.IScheduler)
DEFAULT_CODEBASES = {'': {}}
compare_attrs = ClusteredService.compare_attrs + \
('builderNames', 'properties', 'codebases')
def __init__(self, name, builderNames, properties,
codebases=DEFAULT_CODEBASES):
ClusteredService.__init__(self, name)
ok = True
if not isinstance(builderNames, (list, tuple)):
ok = False
else:
for b in builderNames:
if not isinstance(b, basestring):
ok = False
if not ok:
config.error(
"The builderNames argument to a scheduler must be a list "
"of Builder names.")
self.builderNames = builderNames
self.properties = Properties()
self.properties.update(properties, "Scheduler")
self.properties.setProperty("scheduler", name, "Scheduler")
self.objectid = None
self.master = None
# Set the codebases that are necessary to process the changes
# These codebases will always result in a sourcestamp with or without
# changes
if codebases is not None:
if not isinstance(codebases, dict):
config.error("Codebases must be a dict of dicts")
for codebase, codebase_attrs in codebases.iteritems():
if not isinstance(codebase_attrs, dict):
config.error("Codebases must be a dict of dicts")
if (codebases != BaseScheduler.DEFAULT_CODEBASES and
'repository' not in codebase_attrs):
config.error(
"The key 'repository' is mandatory in codebases")
else:
config.error("Codebases cannot be None")
self.codebases = codebases
# internal variables
self._change_consumer = None
self._change_consumption_lock = defer.DeferredLock()
# activity handling
def activate(self):
return defer.succeed(None)
def deactivate(self):
return defer.maybeDeferred(self._stopConsumingChanges)
# service handling
def _getServiceId(self):
return self.master.data.updates.findSchedulerId(self.name)
def _claimService(self):
return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
self.master.masterid)
def _unclaimService(self):
return self.master.data.updates.trySetSchedulerMaster(self.serviceid,
None)
# status queries
# deprecated: these aren't compatible with distributed schedulers
def listBuilderNames(self):
return self.builderNames
def getPendingBuildTimes(self):
return []
# change handling
@defer.inlineCallbacks
def startConsumingChanges(self, fileIsImportant=None, change_filter=None,
onlyImportant=False):
assert fileIsImportant is None or callable(fileIsImportant)
# register for changes with the data API
assert not self._change_consumer
self._change_consumer = yield self.master.data.startConsuming(
lambda k, m: self._changeCallback(k, m, fileIsImportant,
change_filter, onlyImportant),
{},
('changes',))
@defer.inlineCallbacks
def _changeCallback(self, key, msg, fileIsImportant, change_filter,
onlyImportant):
# ignore changes delivered while we're not running
if not self._change_consumer:
return
# get a change object, since the API requires it
chdict = yield self.master.db.changes.getChange(msg['changeid'])
change = yield changes.Change.fromChdict(self.master, chdict)
# filter it
if change_filter and not change_filter.filter_change(change):
return
if change.codebase not in self.codebases:
log.msg(format='change contains codebase %(codebase)s that is '
'not processed by scheduler %(name)s',
codebase=change.codebase, name=self.name)
return
if fileIsImportant:
try:
important = fileIsImportant(change)
if not important and onlyImportant:
return
except:
log.err(failure.Failure(),
'in fileIsImportant check for %s' % change)
return
else:
important = True
# use change_consumption_lock to ensure the service does not stop
# while this change is being processed
d = self._change_consumption_lock.run(
self.gotChange, change, important)
d.addErrback(log.err, 'while processing change')
def _stopConsumingChanges(self):
# (note: called automatically in deactivate)
# acquire the lock change consumption lock to ensure that any change
# consumption is complete before we are done stopping consumption
def stop():
if self._change_consumer:
self._change_consumer.stopConsuming()
self._change_consumer = None
return self._change_consumption_lock.run(stop)
def gotChange(self, change, important):
raise NotImplementedError
# starting builds
def addBuildsetForSourceStampsWithDefaults(self, reason, sourcestamps,
waited_for=False, properties=None, builderNames=None,
**kw):
if sourcestamps is None:
sourcestamps = []
# convert sourcestamps to a dictionary keyed by codebase
stampsByCodebase = {}
for ss in sourcestamps:
cb = ss['codebase']
if cb in stampsByCodebase:
raise RuntimeError("multiple sourcestamps with same codebase")
stampsByCodebase[cb] = ss
# Merge codebases with the passed list of sourcestamps
# This results in a new sourcestamp for each codebase
stampsWithDefaults = []
for codebase in stampsByCodebase:
ss = self.codebases.get(codebase, {}).copy()
# apply info from passed sourcestamps onto the configured default
# sourcestamp attributes for this codebase.
ss.update(stampsByCodebase[codebase])
stampsWithDefaults.append(ss)
return self.addBuildsetForSourceStamps(sourcestamps=stampsWithDefaults,
reason=reason, waited_for=waited_for, properties=properties,
builderNames=builderNames,
**kw)
def getCodebaseDict(self, codebase):
# Hook for subclasses to change codebase parameters when a codebase does
# not have a change associated with it.
return self.codebases[codebase]
@defer.inlineCallbacks
def addBuildsetForChanges(self, waited_for=False, reason='',
external_idstring=None, changeids=[], builderNames=None,
properties=None,
**kw):
changesByCodebase = {}
def get_last_change_for_codebase(codebase):
return max(changesByCodebase[codebase], key=lambda change: change["changeid"])
# Changes are retrieved from database and grouped by their codebase
for changeid in changeids:
chdict = yield self.master.db.changes.getChange(changeid)
changesByCodebase.setdefault(chdict["codebase"], []).append(chdict)
sourcestamps = []
for codebase in self.codebases:
if codebase not in changesByCodebase:
# codebase has no changes
# create a sourcestamp that has no changes
cb = self.getCodebaseDict(codebase)
ss = {
'codebase': codebase,
'repository': cb['repository'],
'branch': cb.get('branch', None),
'revision': cb.get('revision', None),
'project': '',
}
else:
lastChange = get_last_change_for_codebase(codebase)
ss = lastChange['sourcestampid']
sourcestamps.append(ss)
# add one buildset, using the calculated sourcestamps
bsid, brids = yield self.addBuildsetForSourceStamps(
waited_for, sourcestamps=sourcestamps, reason=reason,
external_idstring=external_idstring, builderNames=builderNames,
properties=properties, **kw)
defer.returnValue((bsid, brids))
def addBuildsetForSourceStamps(self, waited_for=False, sourcestamps=[],
reason='', external_idstring=None, properties=None,
builderNames=None, **kw):
# combine properties
if properties:
properties.updateFromProperties(self.properties)
else:
properties = self.properties
# apply the default builderNames
if not builderNames:
builderNames = self.builderNames
# translate properties object into a dict as required by the
# addBuildset method
properties_dict = properties.asDict()
return self.master.data.updates.addBuildset(
scheduler=self.name, sourcestamps=sourcestamps, reason=reason,
waited_for=waited_for, properties=properties_dict, builderNames=builderNames,
external_idstring=external_idstring, **kw)
| zozo123/buildbot | master/buildbot/schedulers/base.py | Python | gpl-3.0 | 10,933 |
from flask import Flask, jsonify, request, render_template, make_response
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template(
'index.html'
)
@app.route('/buscar', methods = ["POST"])
def buscar():
buscado = request.form['buscado']
resultado = realizar_busqueda_2(buscado)
return jsonify({
'resultado': resultado
})
def realizar_busqueda_2(buscado):
bodyQuery2 = {
"query": {
"match": {
"Title": {
"query": buscado,
"fuzziness": "AUTO",
"boost" : 2.0,
"prefix_length" : 1,
"max_expansions": 100,
#"minimum_should_match" : 10,
"operator": "and"
}
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {"fragment_size": 300, "number_of_fragments": 3}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery2)
print("Got %d Hits:" % res['hits']['total'])
# Uso el [0] porque solo hay 1 hit, si hubiese mas, pues habria mas campos
# de la lista, habria que usar el for de arriba para sacar el highlight de
# cada uno de la lista
# print res['hits']['hits'][0]['highlight']
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
def realizar_busqueda(buscado):
bodyQuery = {
"query": {
"match": {
"Director": {
"query": buscado,
"fuzziness": "AUTO",
"operator": "and"
}
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {}
}
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append("%(Title)s" % hit["_source"])
return resultado
def realizar_busqueda_3(buscado):
bodyQuery = {
"query": {
"regexp":{
"Title": buscado +".*"
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {"fragment_size": 300, "number_of_fragments": 3},
"Director": {}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
def realizar_busqueda_4(buscado):
bodyQuery2 = {
"query": {
"bool": {
"should": [
{ "match": {
"Title": {
"query": buscado + ".*",
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Plot": {
"query": buscado,
"fuzziness": 2,
"prefix_length" : 1,
"operator": "and"
}
}
},
{ "match": {
"Genres": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Director": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Writer": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Cast": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Country": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Language": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
{ "match": {
"Rating": {
"query": buscado,
"fuzziness": "AUTO",
"prefix_length" : 1,
"operator": "and"
}
}},
]
}
},
"highlight": {
"fields": {
"Title": {},
"Plot": {},
"Director": {}
},
# Permite el hightlight sobre campos que no se han hecho query
# como Plot en este ejemplo
"require_field_match": False
}
}
res = es.search(index="prueba-index", body= bodyQuery)
print("Got %d Hits:" % res['hits']['total'])
resultado = []
for hit in res['hits']['hits']:
resultado.append(hit['highlight'])
return resultado
if __name__ == '__main__':
app.run(debug=True)
| cristhro/Machine-Learning | ejercicio 5/flaskApp/flaskApp.py | Python | gpl-3.0 | 6,023 |
import itertools
import os
class TreeHasher():
"""uses BlockHasher recursively on a directory tree
Input and output generators are in the format: ( relative-filepath, chunk_nr, hexdigest)
"""
def __init__(self, block_hasher):
"""
:type block_hasher: BlockHasher
"""
self.block_hasher=block_hasher
def generate(self, start_path):
"""Use BlockHasher on every file in a tree, yielding the results
note that it only checks the contents of actual files. It ignores metadata like permissions and mtimes.
It also ignores empty directories, symlinks and special files.
"""
def walkerror(e):
raise e
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
for f in filenames:
file_path=os.path.join(dirpath, f)
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
def compare(self, start_path, generator):
"""reads from generator and compares blocks
yields mismatches in the form: ( relative_filename, chunk_nr, compare_hexdigest, actual_hexdigest )
yields errors in the form: ( relative_filename, chunk_nr, compare_hexdigest, "message" )
"""
count=0
def filter_file_name( file_name, chunk_nr, hexdigest):
return ( chunk_nr, hexdigest )
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count=count+1
block_generator=itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
| psy0rz/zfs_autobackup | zfs_autobackup/TreeHasher.py | Python | gpl-3.0 | 2,011 |
import io
import math
import random
from contextlib import redirect_stdout
from unittest import TestCase
from hamcrest import *
from array_util import get_random_unique_array
from chapter16.problem16_1 import greedy_make_change, make_change, print_change
from datastructures.array import Array
from util import between
def get_min_change_size_bruteforce(n, d):
if n == 0:
return 0
min_change = math.inf
for denom in d:
if denom <= n:
min_change = min(min_change, 1 + get_min_change_size_bruteforce(n - denom, d))
return min_change
class TestProblem16_1(TestCase):
def test_greedy_make_change(self):
n = random.randint(1, 20)
d = Array([1, 2, 5, 10, 20, 50])
actual_change = greedy_make_change(n)
expected_change_size = get_min_change_size_bruteforce(n, d)
actual_change_sum = sum(actual_change[i] * d[i] for i in between(1, d.length))
assert_that(sum(actual_change), is_(equal_to(expected_change_size)))
assert_that(actual_change_sum, is_(equal_to(n)))
def test_make_change(self):
n = random.randint(1, 20)
k = random.randint(1, 5)
d, _ = get_random_unique_array(max_size=k, min_value=2, max_value=20)
d[1] = 1
captured_output = io.StringIO()
actual_change, actual_denominators = make_change(n, d)
with redirect_stdout(captured_output):
print_change(n, actual_denominators)
expected_change_size = get_min_change_size_bruteforce(n, d)
assert_that(actual_change[n], is_(equal_to(expected_change_size)))
actual_change_denoms = [int(d) for d in captured_output.getvalue().splitlines()]
assert_that(sum(actual_change_denoms), is_(equal_to(n)))
assert_that(len(actual_change_denoms), is_(equal_to(expected_change_size)))
| wojtask/CormenPy | test/test_chapter16/test_problem16_1.py | Python | gpl-3.0 | 1,843 |
# -*- coding: utf-8 -*-
# Copyright (c) 2004 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the TR Previewer main window.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import QDir, QTimer, QFileInfo, pyqtSignal, QEvent, QSize, \
QTranslator, QObject, Qt, QCoreApplication
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QSizePolicy, QSpacerItem, QWidget, QHBoxLayout, \
QWhatsThis, QMdiArea, qApp, QApplication, QComboBox, QVBoxLayout, \
QAction, QLabel
from PyQt5 import uic
from E5Gui import E5MessageBox, E5FileDialog
from E5Gui.E5MainWindow import E5MainWindow
import UI.PixmapCache
import UI.Config
import Preferences
noTranslationName = QCoreApplication.translate(
"TRPreviewer", "<No translation>")
class TRPreviewer(E5MainWindow):
"""
Class implementing the UI Previewer main window.
"""
def __init__(self, filenames=[], parent=None, name=None):
"""
Constructor
@param filenames filenames of form and/or translation files to load
@param parent parent widget of this window (QWidget)
@param name name of this window (string)
"""
self.mainWidget = None
self.currentFile = QDir.currentPath()
super(TRPreviewer, self).__init__(parent)
if not name:
self.setObjectName("TRPreviewer")
else:
self.setObjectName(name)
self.setStyle(Preferences.getUI("Style"),
Preferences.getUI("StyleSheet"))
self.resize(QSize(800, 600).expandedTo(self.minimumSizeHint()))
self.statusBar()
self.setWindowIcon(UI.PixmapCache.getIcon("eric.png"))
self.setWindowTitle(self.tr("Translations Previewer"))
self.cw = QWidget(self)
self.cw.setObjectName("qt_central_widget")
self.TRPreviewerLayout = QVBoxLayout(self.cw)
self.TRPreviewerLayout.setContentsMargins(6, 6, 6, 6)
self.TRPreviewerLayout.setSpacing(6)
self.TRPreviewerLayout.setObjectName("TRPreviewerLayout")
self.languageLayout = QHBoxLayout()
self.languageLayout.setContentsMargins(0, 0, 0, 0)
self.languageLayout.setSpacing(6)
self.languageLayout.setObjectName("languageLayout")
self.languageLabel = QLabel(
self.tr("Select language file"), self.cw)
self.languageLabel.setObjectName("languageLabel")
self.languageLayout.addWidget(self.languageLabel)
self.languageCombo = QComboBox(self.cw)
self.languageCombo.setObjectName("languageCombo")
self.languageCombo.setEditable(False)
self.languageCombo.setToolTip(self.tr("Select language file"))
self.languageCombo.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Preferred)
self.languageLayout.addWidget(self.languageCombo)
languageSpacer = QSpacerItem(
40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.languageLayout.addItem(languageSpacer)
self.TRPreviewerLayout.addLayout(self.languageLayout)
self.preview = WidgetArea(self.cw)
self.preview.setObjectName("preview")
self.TRPreviewerLayout.addWidget(self.preview)
self.preview.lastWidgetClosed.connect(self.__updateActions)
self.setCentralWidget(self.cw)
self.languageCombo.activated[str].connect(self.setTranslation)
self.translations = TranslationsDict(self.languageCombo, self)
self.translations.translationChanged.connect(
self.preview.rebuildWidgets)
self.__initActions()
self.__initMenus()
self.__initToolbars()
self.__updateActions()
# fire up the single application server
from .TRSingleApplication import TRSingleApplicationServer
self.SAServer = TRSingleApplicationServer(self)
self.SAServer.loadForm.connect(self.preview.loadWidget)
self.SAServer.loadTranslation.connect(self.translations.add)
# defere loading of a UI file until we are shown
self.filesToLoad = filenames[:]
def show(self):
"""
Public slot to show this dialog.
This overloaded slot loads a UI file to be previewed after
the main window has been shown. This way, previewing a dialog
doesn't interfere with showing the main window.
"""
super(TRPreviewer, self).show()
if self.filesToLoad:
filenames, self.filesToLoad = (self.filesToLoad[:], [])
first = True
for fn in filenames:
fi = QFileInfo(fn)
if fi.suffix().lower() == 'ui':
self.preview.loadWidget(fn)
elif fi.suffix().lower() == 'qm':
self.translations.add(fn, first)
first = False
self.__updateActions()
def closeEvent(self, event):
"""
Protected event handler for the close event.
@param event close event (QCloseEvent)
"""
if self.SAServer is not None:
self.SAServer.shutdown()
self.SAServer = None
event.accept()
def __initActions(self):
"""
Private method to define the user interface actions.
"""
self.openUIAct = QAction(
UI.PixmapCache.getIcon("openUI.png"),
self.tr('&Open UI Files...'), self)
self.openUIAct.setStatusTip(self.tr('Open UI files for display'))
self.openUIAct.setWhatsThis(self.tr(
"""<b>Open UI Files</b>"""
"""<p>This opens some UI files for display.</p>"""
))
self.openUIAct.triggered.connect(self.__openWidget)
self.openQMAct = QAction(
UI.PixmapCache.getIcon("openQM.png"),
self.tr('Open &Translation Files...'), self)
self.openQMAct.setStatusTip(self.tr(
'Open Translation files for display'))
self.openQMAct.setWhatsThis(self.tr(
"""<b>Open Translation Files</b>"""
"""<p>This opens some translation files for display.</p>"""
))
self.openQMAct.triggered.connect(self.__openTranslation)
self.reloadAct = QAction(
UI.PixmapCache.getIcon("reload.png"),
self.tr('&Reload Translations'), self)
self.reloadAct.setStatusTip(self.tr(
'Reload the loaded translations'))
self.reloadAct.setWhatsThis(self.tr(
"""<b>Reload Translations</b>"""
"""<p>This reloads the translations for the loaded"""
""" languages.</p>"""
))
self.reloadAct.triggered.connect(self.translations.reload)
self.exitAct = QAction(
UI.PixmapCache.getIcon("exit.png"), self.tr('&Quit'), self)
self.exitAct.setShortcut(QKeySequence(
self.tr("Ctrl+Q", "File|Quit")))
self.exitAct.setStatusTip(self.tr('Quit the application'))
self.exitAct.setWhatsThis(self.tr(
"""<b>Quit</b>"""
"""<p>Quit the application.</p>"""
))
self.exitAct.triggered.connect(qApp.closeAllWindows)
self.whatsThisAct = QAction(
UI.PixmapCache.getIcon("whatsThis.png"),
self.tr('&What\'s This?'), self)
self.whatsThisAct.setShortcut(QKeySequence(self.tr("Shift+F1")))
self.whatsThisAct.setStatusTip(self.tr('Context sensitive help'))
self.whatsThisAct.setWhatsThis(self.tr(
"""<b>Display context sensitive help</b>"""
"""<p>In What's This? mode, the mouse cursor shows an arrow"""
""" with a question mark, and you can click on the interface"""
""" elements to get a short description of what they do and"""
""" how to use them. In dialogs, this feature can be accessed"""
""" using the context help button in the titlebar.</p>"""
))
self.whatsThisAct.triggered.connect(self.__whatsThis)
self.aboutAct = QAction(self.tr('&About'), self)
self.aboutAct.setStatusTip(self.tr(
'Display information about this software'))
self.aboutAct.setWhatsThis(self.tr(
"""<b>About</b>"""
"""<p>Display some information about this software.</p>"""
))
self.aboutAct.triggered.connect(self.__about)
self.aboutQtAct = QAction(self.tr('About &Qt'), self)
self.aboutQtAct.setStatusTip(
self.tr('Display information about the Qt toolkit'))
self.aboutQtAct.setWhatsThis(self.tr(
"""<b>About Qt</b>"""
"""<p>Display some information about the Qt toolkit.</p>"""
))
self.aboutQtAct.triggered.connect(self.__aboutQt)
self.tileAct = QAction(self.tr('&Tile'), self)
self.tileAct.setStatusTip(self.tr('Tile the windows'))
self.tileAct.setWhatsThis(self.tr(
"""<b>Tile the windows</b>"""
"""<p>Rearrange and resize the windows so that they are"""
""" tiled.</p>"""
))
self.tileAct.triggered.connect(self.preview.tileSubWindows)
self.cascadeAct = QAction(self.tr('&Cascade'), self)
self.cascadeAct.setStatusTip(self.tr('Cascade the windows'))
self.cascadeAct.setWhatsThis(self.tr(
"""<b>Cascade the windows</b>"""
"""<p>Rearrange and resize the windows so that they are"""
""" cascaded.</p>"""
))
self.cascadeAct.triggered.connect(self.preview.cascadeSubWindows)
self.closeAct = QAction(
UI.PixmapCache.getIcon("close.png"), self.tr('&Close'), self)
self.closeAct.setShortcut(QKeySequence(self.tr(
"Ctrl+W", "File|Close")))
self.closeAct.setStatusTip(self.tr('Close the current window'))
self.closeAct.setWhatsThis(self.tr(
"""<b>Close Window</b>"""
"""<p>Close the current window.</p>"""
))
self.closeAct.triggered.connect(self.preview.closeWidget)
self.closeAllAct = QAction(self.tr('Clos&e All'), self)
self.closeAllAct.setStatusTip(self.tr('Close all windows'))
self.closeAllAct.setWhatsThis(self.tr(
"""<b>Close All Windows</b>"""
"""<p>Close all windows.</p>"""
))
self.closeAllAct.triggered.connect(self.preview.closeAllWidgets)
def __initMenus(self):
"""
Private method to create the menus.
"""
mb = self.menuBar()
menu = mb.addMenu(self.tr('&File'))
menu.setTearOffEnabled(True)
menu.addAction(self.openUIAct)
menu.addAction(self.openQMAct)
menu.addAction(self.reloadAct)
menu.addSeparator()
menu.addAction(self.closeAct)
menu.addAction(self.closeAllAct)
menu.addSeparator()
menu.addAction(self.exitAct)
self.windowMenu = mb.addMenu(self.tr('&Window'))
self.windowMenu.setTearOffEnabled(True)
self.windowMenu.aboutToShow.connect(self.__showWindowMenu)
self.windowMenu.triggered.connect(self.preview.toggleSelectedWidget)
mb.addSeparator()
menu = mb.addMenu(self.tr('&Help'))
menu.setTearOffEnabled(True)
menu.addAction(self.aboutAct)
menu.addAction(self.aboutQtAct)
menu.addSeparator()
menu.addAction(self.whatsThisAct)
def __initToolbars(self):
"""
Private method to create the toolbars.
"""
filetb = self.addToolBar(self.tr("File"))
filetb.setIconSize(UI.Config.ToolBarIconSize)
filetb.addAction(self.openUIAct)
filetb.addAction(self.openQMAct)
filetb.addAction(self.reloadAct)
filetb.addSeparator()
filetb.addAction(self.closeAct)
filetb.addSeparator()
filetb.addAction(self.exitAct)
helptb = self.addToolBar(self.tr("Help"))
helptb.setIconSize(UI.Config.ToolBarIconSize)
helptb.addAction(self.whatsThisAct)
def __whatsThis(self):
"""
Private slot called in to enter Whats This mode.
"""
QWhatsThis.enterWhatsThisMode()
def __updateActions(self):
"""
Private slot to update the actions state.
"""
if self.preview.hasWidgets():
self.closeAct.setEnabled(True)
self.closeAllAct.setEnabled(True)
self.tileAct.setEnabled(True)
self.cascadeAct.setEnabled(True)
else:
self.closeAct.setEnabled(False)
self.closeAllAct.setEnabled(False)
self.tileAct.setEnabled(False)
self.cascadeAct.setEnabled(False)
if self.translations.hasTranslations():
self.reloadAct.setEnabled(True)
else:
self.reloadAct.setEnabled(False)
def __about(self):
"""
Private slot to show the about information.
"""
E5MessageBox.about(
self,
self.tr("TR Previewer"),
self.tr(
"""<h3> About TR Previewer </h3>"""
"""<p>The TR Previewer loads and displays Qt User-Interface"""
""" files and translation files and shows dialogs for a"""
""" selected language.</p>"""
)
)
def __aboutQt(self):
"""
Private slot to show info about Qt.
"""
E5MessageBox.aboutQt(self, self.tr("TR Previewer"))
def __openWidget(self):
"""
Private slot to handle the Open Dialog action.
"""
fileNameList = E5FileDialog.getOpenFileNames(
None,
self.tr("Select UI files"),
"",
self.tr("Qt User-Interface Files (*.ui)"))
for fileName in fileNameList:
self.preview.loadWidget(fileName)
self.__updateActions()
def __openTranslation(self):
"""
Private slot to handle the Open Translation action.
"""
fileNameList = E5FileDialog.getOpenFileNames(
None,
self.tr("Select translation files"),
"",
self.tr("Qt Translation Files (*.qm)"))
first = True
for fileName in fileNameList:
self.translations.add(fileName, first)
first = False
self.__updateActions()
def setTranslation(self, name):
"""
Public slot to activate a translation.
@param name name (language) of the translation (string)
"""
self.translations.set(name)
def __showWindowMenu(self):
"""
Private slot to handle the aboutToShow signal of the window menu.
"""
self.windowMenu.clear()
self.windowMenu.addAction(self.tileAct)
self.windowMenu.addAction(self.cascadeAct)
self.windowMenu.addSeparator()
self.preview.showWindowMenu(self.windowMenu)
def reloadTranslations(self):
"""
Public slot to reload all translations.
"""
self.translations.reload()
class Translation(object):
"""
Class to store the properties of a translation.
"""
def __init__(self):
"""
Constructor
"""
self.fileName = None
self.name = None
self.translator = None
class TranslationsDict(QObject):
"""
Class to store all loaded translations.
@signal translationChanged() emit after a translator was set
"""
translationChanged = pyqtSignal()
def __init__(self, selector, parent):
"""
Constructor
@param selector reference to the QComboBox used to show the
available languages (QComboBox)
@param parent parent widget (QWidget)
"""
super(TranslationsDict, self).__init__(parent)
self.selector = selector
self.currentTranslator = None
self.selector.addItem(noTranslationName)
self.translations = [] # list of Translation objects
def add(self, fileName, setTranslation=True):
"""
Public method to add a translation to the list.
If the translation file (*.qm) has not been loaded yet, it will
be loaded automatically.
@param fileName name of the translation file to be added (string)
@param setTranslation flag indicating, if this should be set as
the active translation (boolean)
"""
if not self.__haveFileName(fileName):
ntr = Translation()
ntr.fileName = fileName
ntr.name = self.__uniqueName(fileName)
if ntr.name is None:
E5MessageBox.warning(
self.parent(),
self.tr("Set Translator"),
self.tr(
"""<p>The translation filename <b>{0}</b>"""
""" is invalid.</p>""").format(fileName))
return
ntr.translator = self.loadTransFile(fileName)
if ntr.translator is None:
return
self.selector.addItem(ntr.name)
self.translations.append(ntr)
if setTranslation:
tr = self.__findFileName(fileName)
self.set(tr.name)
def set(self, name):
"""
Public slot to set a translator by name.
@param name name (language) of the translator to set (string)
"""
nTranslator = None
if name != noTranslationName:
trans = self.__findName(name)
if trans is None:
E5MessageBox.warning(
self.parent(),
self.tr("Set Translator"),
self.tr(
"""<p>The translator <b>{0}</b> is not known.</p>""")
.format(name))
return
nTranslator = trans.translator
if nTranslator == self.currentTranslator:
return
if self.currentTranslator is not None:
QApplication.removeTranslator(self.currentTranslator)
if nTranslator is not None:
QApplication.installTranslator(nTranslator)
self.currentTranslator = nTranslator
self.selector.blockSignals(True)
self.selector.setCurrentIndex(self.selector.findText(name))
self.selector.blockSignals(False)
self.translationChanged.emit()
def reload(self):
"""
Public method to reload all translators.
"""
cname = self.selector.currentText()
if self.currentTranslator is not None:
QApplication.removeTranslator(self.currentTranslator)
self.currentTranslator = None
fileNames = []
for trans in self.translations:
trans.translator = None
fileNames.append(trans.fileName)
self.translations = []
self.selector.clear()
self.selector.addItem(noTranslationName)
for fileName in fileNames:
self.add(fileName, False)
if self.__haveName(cname):
self.set(cname)
else:
self.set(noTranslationName)
def __findFileName(self, transFileName):
"""
Private method to find a translation by file name.
@param transFileName file name of the translation file (string)
@return reference to a translation object or None
"""
for trans in self.translations:
if trans.fileName == transFileName:
return trans
return None
def __findName(self, name):
"""
Private method to find a translation by name.
@param name name (language) of the translation (string)
@return reference to a translation object or None
"""
for trans in self.translations:
if trans.name == name:
return trans
return None
def __haveFileName(self, transFileName):
"""
Private method to check for the presence of a translation.
@param transFileName file name of the translation file (string)
@return flag indicating the presence of the translation (boolean)
"""
return self.__findFileName(transFileName) is not None
def __haveName(self, name):
"""
Private method to check for the presence of a named translation.
@param name name (language) of the translation (string)
@return flag indicating the presence of the translation (boolean)
"""
return self.__findName(name) is not None
def __uniqueName(self, transFileName):
"""
Private method to generate a unique name.
@param transFileName file name of the translation file (string)
@return unique name (string or None)
"""
name = os.path.basename(transFileName)
if not name:
return None
uname = name
cnt = 1
while self.__haveName(uname):
cnt += 1
uname = "{0} <{1}>".format(name, cnt)
return uname
def __del(self, name):
"""
Private method to delete a translator from the list of available
translators.
@param name name of the translator to delete (string)
"""
if name == noTranslationName:
return
trans = self.__findName(name)
if trans is None:
return
if self.selector().currentText() == name:
self.set(noTranslationName)
self.translations.remove(trans)
del trans
def loadTransFile(self, transFileName):
"""
Public slot to load a translation file.
@param transFileName file name of the translation file (string)
@return reference to the new translator object (QTranslator)
"""
tr = QTranslator()
if tr.load(transFileName):
return tr
E5MessageBox.warning(
self.parent(),
self.tr("Load Translator"),
self.tr("""<p>The translation file <b>{0}</b> could"""
""" not be loaded.</p>""").format(transFileName))
return None
def hasTranslations(self):
"""
Public method to check for loaded translations.
@return flag signaling if any translation was loaded (boolean)
"""
return len(self.translations) > 0
class WidgetView(QWidget):
"""
Class to show a dynamically loaded widget (or dialog).
"""
def __init__(self, uiFileName, parent=None, name=None):
"""
Constructor
@param uiFileName name of the UI file to load (string)
@param parent parent widget (QWidget)
@param name name of this widget (string)
"""
super(WidgetView, self).__init__(parent)
if name:
self.setObjectName(name)
self.setWindowTitle(name)
self.__widget = None
self.__uiFileName = uiFileName
self.__layout = QHBoxLayout(self)
self.__valid = False
self.__timer = QTimer(self)
self.__timer.setSingleShot(True)
self.__timer.timeout.connect(self.buildWidget)
def isValid(self):
"""
Public method to return the validity of this widget view.
@return flag indicating the validity (boolean)
"""
return self.__valid
def uiFileName(self):
"""
Public method to retrieve the name of the UI file.
@return filename of the loaded UI file (string)
"""
return self.__uiFileName
def buildWidget(self):
"""
Public slot to load a UI file.
"""
if self.__widget:
self.__widget.close()
self.__layout.removeWidget(self.__widget)
del self.__widget
self.__widget = None
try:
self.__widget = uic.loadUi(self.__uiFileName)
except:
pass
if not self.__widget:
E5MessageBox.warning(
self,
self.tr("Load UI File"),
self.tr(
"""<p>The file <b>{0}</b> could not be loaded.</p>""")
.format(self.__uiFileName))
self.__valid = False
return
self.__widget.setParent(self)
self.__layout.addWidget(self.__widget)
self.__widget.show()
self.__valid = True
self.adjustSize()
self.__timer.stop()
def __rebuildWidget(self):
"""
Private method to schedule a rebuild of the widget.
"""
self.__timer.start(0)
class WidgetArea(QMdiArea):
"""
Specialized MDI area to show the loaded widgets.
@signal lastWidgetClosed() emitted after last widget was closed
"""
lastWidgetClosed = pyqtSignal()
rebuildWidgets = pyqtSignal()
def __init__(self, parent=None):
"""
Constructor
@param parent parent widget (QWidget)
"""
super(WidgetArea, self).__init__(parent)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.widgets = []
def loadWidget(self, uiFileName):
"""
Public slot to load a UI file.
@param uiFileName name of the UI file to load (string)
"""
wview = self.__findWidget(uiFileName)
if wview is None:
name = os.path.basename(uiFileName)
if not name:
E5MessageBox.warning(
self,
self.tr("Load UI File"),
self.tr(
"""<p>The file <b>{0}</b> could not be loaded.</p>""")
.format(uiFileName))
return
uname = name
cnt = 1
while self.findChild(WidgetView, uname) is not None:
cnt += 1
uname = "{0} <{1}>".format(name, cnt)
name = uname
wview = WidgetView(uiFileName, self, name)
wview.buildWidget()
if not wview.isValid():
del wview
return
self.rebuildWidgets.connect(wview.buildWidget) # __IGNORE_WARNING__
wview.installEventFilter(self) # __IGNORE_WARNING__
win = self.addSubWindow(wview) # __IGNORE_WARNING__
self.widgets.append(win)
wview.showNormal() # __IGNORE_WARNING__
def eventFilter(self, obj, ev):
"""
Public method called to filter an event.
@param obj object, that generated the event (QObject)
@param ev the event, that was generated by object (QEvent)
@return flag indicating if event was filtered out
"""
if obj in self.widgets and ev.type() == QEvent.Close:
try:
self.widgets.remove(obj)
if len(self.widgets) == 0:
self.lastWidgetClosed.emit()
except ValueError:
pass
return QMdiArea.eventFilter(self, obj, ev)
def __findWidget(self, uiFileName):
"""
Private method to find a specific widget view.
@param uiFileName filename of the loaded UI file (string)
@return reference to the widget (WidgetView) or None
"""
wviewList = self.findChildren(WidgetView)
if wviewList is None:
return None
for wview in wviewList:
if wview.uiFileName() == uiFileName:
return wview
return None
def closeWidget(self):
"""
Public slot to close the active window.
"""
aw = self.activeSubWindow()
if aw is not None:
aw.close()
def closeAllWidgets(self):
"""
Public slot to close all windows.
"""
for w in self.widgets[:]:
w.close()
def showWindowMenu(self, windowMenu):
"""
Public method to set up the widgets part of the Window menu.
@param windowMenu reference to the window menu
"""
idx = 0
for wid in self.widgets:
act = windowMenu.addAction(wid.windowTitle())
act.setData(idx)
act.setCheckable(True)
act.setChecked(not wid.isHidden())
idx = idx + 1
def toggleSelectedWidget(self, act):
"""
Public method to handle the toggle of a window.
@param act reference to the action that triggered (QAction)
"""
idx = act.data()
if idx is not None:
self.__toggleWidget(self.widgets[idx])
def __toggleWidget(self, w):
"""
Private method to toggle a workspace window.
@param w window to be toggled
"""
if w.isHidden():
w.show()
else:
w.hide()
def hasWidgets(self):
"""
Public method to check for loaded widgets.
@return flag signaling if any widget was loaded (boolean)
"""
return len(self.widgets) > 0
| davy39/eric | Tools/TRPreviewer.py | Python | gpl-3.0 | 29,971 |
# -*- coding: utf-8 -*-
"""
This module implements users.
"""
import os
from persistent import Persistent
import src.core.db as db
import src.core.exc as exc
import src.core.utils as utils
import src.core.security as security
import random
import time
class User(object):
def __init__(self):
self.__set_to_none()
def __set_to_none(self):
self.db = None
self.app_db = None
self.app_db_data = None
self.id = None
self.password_hash = None
self.password_verified = None
self.user_data = None # pdict
self.name = None
self.plugins = None # plugins are bound to Users
# TODO: create a list of allowed plugins.
def login(self, _id, password):
"""
Shorthand function for load, verify_password and load_user_db.
Automatically retrives the app_db_data of the given id.
"""
if self.app_db is None:
self.connect_to_appdb()
self.load(self.app_db.root['Users'][_id])
if self.verify_password(password):
self.load_user_db(password)
return self.password_verified
def close(self, password, save_plugins=True, set_to_none=True):
"""
Argument save_plugins will be directly passed to cDB.
Argument password will be used for authentication and key encryption.
Returns True in case user database closing had been successful, False otherwise.
Regardless of database closing, if set_to_none is True,
internal variables will be erased.
"""
closed = False
if db.user_db is not None:
if self.verify_password(password):
db.user_db.close(save_plugins=save_plugins, encrypt=True, key=password)
closed = True
else:
raise exc.USR_ClosingUnverifiedUser()
db.user_db = None
if set_to_none:
self.__set_to_none()
return closed
def delete_user(self, password):
"""
Removes user from app_db and deletes its database files.
User has to be logged in first.
User database will be closed in the process.
This function does not explicitly handle user closing (__set_to_none).
"""
if self.app_db is None:
self.connect_to_appdb()
if self.id is None:
raise exc.USR_DeletingUninitilizedUser()
if not self.verify_password(password):
raise exc.USR_DeletingUnverifiedUser()
self.close_db()
del self.app_db.root['Users'][self.id]
return self.delete_files()
def save_user(self):
"""
Saves user global information (specifically: in
'User-Info' and in 'Users'). Does not save data
in the user's database (handled by User.close).
"""
if db.user_db is None:
raise exc.USR_SavingUserOnUnitilizedDb()
db.user_db.root['User-Info'] = self.user_data
self.save_dbdata(save_in_db=True)
def create_user(self, name, password):
"""
Creates the user with the given password and name and a new randomly generated id.
Also loads the database db.user_db.
User doesn't get saved by this function either in the application database
or in the user database. It has to be saved explicitly at a later
moment. The user database gets initilized anyways even though it doesn't get saved.
"""
if type(password) != str:
raise exc.USR_BadPasswordType(password)
self.password_hash = security.pwd_context.encrypt(password)
if not security.pwd_context.verify(password, self.password_hash):
raise exc.USR_EncryptionFailureOnCreatingUser()
self.password_verified = True
self.id = generate_new_id()
self.name = name
self.init_db(decrypt=False)
self.user_data = utils.pdict()
self.user_data.update({
"CreationDay": time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
})
def load(self, app_db_data):
"""
This function loads the given folder (UserDataAppDB) into this User instance.
Essentially loads the user id and password hash that needs to be verified.
The password doesn't get verified. Must happen at a later moment.
"""
if app_db_data.__class__ != UserDataAppDB:
raise exc.USR_BadFolderClass(app_db_data)
self.retrieve_dbdata(app_db_data)
def verify_password(self, password):
"""
This function verifies the password argument with the loaded password hash.
Returns True in case the password has been correctly verified, False otherwise.
If no password hash has been loaded, None will be returned.
"""
self.password_verified = None
if self.password_hash is not None:
self.password_verified = security.pwd_context.verify(password, self.password_hash)
return self.password_verified
def connect_to_appdb(self):
"""
Connects this user to the application database
and stores it in User.app_db only if it was
already opened.
"""
if db.app_db is None:
raise exc.USR_AppDbClosed()
self.app_db = db.app_db
def load_user_db(self, password, decrypt=True):
if not self.password_verified:
raise exc.USR_LoadingUnverifiedUser()
self.init_db(decrypt=decrypt, password=password)
self.user_data = db.user_db.root['User-Info']
def retrieve_dbdata(self, app_db_data=None):
"""
Retrieves data from self.app_db_data.
For lazy people, app_db_data can be directly passed
to this function and self.app_db_data will automatically
be set to it.
"""
if app_db_data is not None:
self.app_db_data = app_db_data
if self.app_db_data is None:
raise exc.USR_AppDbDataNone()
self.id = self.app_db_data.id
self.name = self.app_db_data.name
self.password_hash = self.app_db_data.password_hash
def save_dbdata(self, save_in_db=False):
"""
Saves global user data to self.app_db_data.
For lazy people, if save_in_appdb = True, then
self.app_db_data will be saved in the application
database.
"""
if self.app_db is None:
self.connect_to_appdb()
self.app_db_data = UserDataAppDB()
self.app_db_data.id = self.id
self.app_db_data.name = self.name
self.app_db_data.password_hash = self.password_hash
if save_in_db:
self.app_db.root['Users'][self.id] = self.app_db_data
def delete_files(self):
"""
Deletes database files related to this user.
If a file doesn't exist the os.remove() call won't be executed.
A list containing the deleted files paths will be returned.
"""
base_path = os.path.join(utils.DATABASES_PATH, name_id(self.id)+".csu_user_db")
deleted = []
paths = [ # ZODB usually creates these files for one database.
base_path,
base_path + ".index",
base_path + ".lock",
base_path + ".tmp"
]
for path in paths:
if os.path.exists(path):
os.remove(path)
deleted.append(path)
return deleted
def init_db(self, decrypt=True, password=None):
"""
Initializes the user database and stores it in User.db.
:return: self.db
"""
self.db = db.init_user_db(name_id(self.id), decrypt=decrypt, key=password)
return self.db
def close_db(self):
"""
Closes User.db.
"""
return self.db.close()
class UserDataAppDB(Persistent):
def __init__(self, id=None, create=False):
if type(id) != int and id is not None:
raise exc.USR_BadIdType(id)
self.__set_to_none()
if create:
self.id = generate_new_id()
else: self.id = id
def __set_to_none(self):
self.id = None
self.password_hash = None
self.name = None
def generate_new_id():
"""
Generates a new id that wasn't already in the app_db.
If app_db is unavailable, the unavailable ids will
be considered to be none.
"""
if db.app_db is not None:
unavailable_ids = db.app_db.root['Users'].children.keys()
else:
unavailable_ids = []
while True:
new_id = random.randrange(10000,99999)
try:
unavailable_ids[new_id]
except IndexError:
return new_id
def name_id(id):
"""
Shorthand function for "user"+str(id).
"""
return "user" + str(id) | dpdani/csu | src/core/user.py | Python | gpl-3.0 | 7,518 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin ok
classe per gestire il map tool in ambito del comando array
-------------------
begin : 2016-05-31
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from .. import qad_utils
from ..qad_variables import QadVariables
from ..qad_getpoint import QadGetPoint, QadGetPointSelectionModeEnum, QadGetPointDrawModeEnum
from ..qad_highlight import QadHighlight
from ..qad_dim import QadDimStyles, appendDimEntityIfNotExisting
from ..qad_entity import QadCacheEntitySetIterator, QadEntityTypeEnum
from .. import qad_array_fun
#===============================================================================
# Qad_array_maptool_ModeEnum class.
#===============================================================================
class Qad_array_maptool_ModeEnum():
# non si richiede niente
NONE = 0
# si richiede il punto base
ASK_FOR_BASE_PT = 1
# si richiede il primo punto per la distanza tra colonne
ASK_FOR_COLUMN_SPACE_FIRST_PT = 2
# si richiede il primo punto per la dimensione della cella
ASK_FOR_1PT_CELL = 3
# si richiede il psecondo punto per la dimensione della cella
ASK_FOR_2PT_CELL = 4
# si richiede il primo punto per la distanza tra righe
ASK_FOR_ROW_SPACE_FIRST_PT = 5
#===============================================================================
# Qad_array_maptool class
#===============================================================================
class Qad_array_maptool(QadGetPoint):
def __init__(self, plugIn):
QadGetPoint.__init__(self, plugIn)
self.cacheEntitySet = None
self.basePt = None
self.arrayType = None
self.distanceBetweenRows = None
self.distanceBetweenCols = None
self.itemsRotation = None
# serie rettangolare
self.rectangleAngle = None
self.rectangleCols = None
self.rectangleRows = None
self.firstPt = None
# serie traiettoria
self.pathTangentDirection = None
self.pathRows = None
self.pathItemsNumber = None
self.pathPolyline = None
# serie polare
self.centerPt = None
self.polarItemsNumber = None
self.polarAngleBetween = None
self.polarRows = None
self.__highlight = QadHighlight(self.canvas)
def hidePointMapToolMarkers(self):
QadGetPoint.hidePointMapToolMarkers(self)
self.__highlight.hide()
def showPointMapToolMarkers(self):
QadGetPoint.showPointMapToolMarkers(self)
self.__highlight.show()
def clear(self):
QadGetPoint.clear(self)
self.__highlight.reset()
self.mode = None
#============================================================================
# doRectangleArray
#============================================================================
def doRectangleArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayRectangleEntity(self.plugIn, entity, self.basePt, self.rectangleRows, self.rectangleCols, \
self.distanceBetweenRows, self.distanceBetweenCols, self.rectangleAngle, self.itemsRotation,
False, self.__highlight) == False:
return
#============================================================================
# doPathArray
#============================================================================
def doPathArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPathEntity(self.plugIn, entity, self.basePt, self.pathRows, self.pathItemsNumber, \
self.distanceBetweenRows, self.distanceBetweenCols, self.pathTangentDirection, self.itemsRotation, \
self.pathPolyline, self.distanceFromStartPt, \
False, self.__highlight) == False:
return
#============================================================================
# doPolarArray
#============================================================================
def doPolarArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPolarEntity(self.plugIn, entity, self.basePt, self.centerPt, self.polarItemsNumber, \
self.polarAngleBetween, self.polarRows, self.distanceBetweenRows, self.itemsRotation, \
False, self.__highlight) == False:
return
def canvasMoveEvent(self, event):
QadGetPoint.canvasMoveEvent(self, event)
# # noto il punto base si richiede il secondo punto
# if self.mode == Qad_array_maptool_ModeEnum.BASE_PT_KNOWN_ASK_FOR_COPY_PT:
# self.setCopiedGeometries(self.tmpPoint)
def activate(self):
QadGetPoint.activate(self)
self.__highlight.show()
def deactivate(self):
try: # necessario perché se si chiude QGIS parte questo evento nonostante non ci sia più l'oggetto maptool !
QadGetPoint.deactivate(self)
self.__highlight.hide()
except:
pass
def setMode(self, mode):
self.mode = mode
# non si richiede niente
if self.mode == Qad_array_maptool_ModeEnum.NONE:
self.setSelectionMode(QadGetPointSelectionModeEnum.NONE)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il punto base
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_BASE_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il primo punto per la distanza tra colonne
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_COLUMN_SPACE_FIRST_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il primo punto per la dimensione della cella
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_1PT_CELL:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il psecondo punto per la dimensione della cella
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_2PT_CELL:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.ELASTIC_RECTANGLE)
self.setStartPoint(self.firstPt)
# si richiede il primo punto per la distanza tra righe
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_ROW_SPACE_FIRST_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il secondo punto per la distanza tra colonne
# elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_COLUMN_SPACE_SECOND_PT:
# self.setDrawMode(QadGetPointDrawModeEnum.ELASTIC_LINE)
# self.setStartPoint(self.firstPt)
| gam17/QAD | cmd/qad_array_maptool.py | Python | gpl-3.0 | 9,990 |
"""
Some useful utility functions missing from numpy/scipy.
Copyright 2016 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
import numpy as np
def dir_clip(data, clips):
"""
'Directional' clip. Dimension of data and clips must be the same. Values in data
are clipped according to corresponding values in clips and returned as a new array.
new_value = portion of value between 0 and clip.
If clip is nan, new_value = value.
"""
if isinstance(data, np.ndarray): results = data.copy()
else: results = np.array(data)
mask = (np.sign(data) != np.sign(clips)) \
& ~np.isnan(data) & ~np.isnan(clips)
results[mask] = 0.0
mask = ~mask & (abs(data) > abs(clips))
results[mask] = clips[mask]
return results
def toward_zero(data, value):
"""
Subtract value from postive values of data, and add value to negative values
of data. Do not cross zero.
"""
results = data.copy()
results[data > 0] -= value
results[data < 0] += value
results[(data > 0) & (results < 0)] = 0.0
results[(data < 0) & (results > 0)] = 0.0
return results
def per_clip(data, caps):
"""
Return values in data clipped between %le values of (caps[0], caps[1]).
If caps is a scalar, only large values are capped.
"""
if np.isscalar(caps):
return np.fmin(data, np.percentile(data, caps))
low, high = np.percentile(data, caps)
return np.clip(data, low, high)
def scale2unit(data, eps=.1, dtype=None, soft_clip=99.99):
"""
Scale values to between -1.0 and +1.0 strictly, and less strictly between
-1.0 + <eps> or 1.0 - <eps>.
More precisely, amplitude is scaled such that <large_value> is set to
-1.0 + <eps> or 1.0 - <eps>, where <large_value> is
if soft_clip is None:
the max value of abs(data)
else:
soft_clip %le value of abs(data)
Result is returned as type <dtype>, which defaults to
if data.dtype is an integer type: float32
else: data.dtype
"""
if dtype is None:
dtype = data.dtype
if 'int' in str(dtype): dtype = np.float32
data = data / (np.percentile(abs(data), soft_clip) if soft_clip
else np.max(abs(data)))
if eps: data *= 1. - eps
if soft_clip: data = np.clip(data, -1.0, 1.0)
return data.astype(dtype, copy=False)
def softmax(data, axis=None):
"""Scale exp(data) to sum to unit along axis."""
edata = np.exp(data)
return edata / np.sum(edata, axis=axis)[:, None].swapaxes(-1, axis)
def sigmoid(data):
"""Sigmoid activation function."""
return 1 / (1 + np.exp(-data))
def logit(data, eps=1e-8):
"""Inverse of the sigmoid function."""
return -np.log(1 / (data + eps) - 1 + eps)
def elu(data, alpha=1.0, copy=True):
"""Exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = alpha * (np.exp(data[mask]) - 1.0)
return result
def celu(data, alpha, copy=True):
"""Continuously differentiable exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = alpha * (np.exp(data[mask] / alpha) - 1.0)
return result
def ielu(data, copy=True, eps=1e-20):
"""Inverse exponential LU activation function."""
if copy: result = data.copy()
else: result = data
mask = data < 0
result[mask] = np.log(data[mask] + 1.0 + eps)
return result
def llu(data, copy=True):
"""
Linear-log activation function; linear inside of +/-1.0,
log outside of it.
"""
if copy: result = data.copy()
else: result = data
mask = data > 1.0
result[mask] = np.log(data[mask]) + 1.0
mask = data < -1.0
result[mask] = -np.log(-data[mask]) - 1.0
return result
def illu(data, copy=True):
"""Inverse of llu."""
if copy: result = data.copy()
else: result = data
mask = data > 1.0
result[mask] = np.exp(data[mask] - 1.0)
mask = data < -1.0
result[mask] = -np.exp(-data[mask] - 1.0)
return result
def sroot(data, power=0.5):
"""
'Signed' square-root (default power = 0.5):
raised abs(data) to power, then multiply by sign(data).
"""
result = np.abs(data)**power
return np.sign(data) * result
| Fenugreek/tamarind | functions.py | Python | gpl-3.0 | 4,617 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Desarrollado por rNet Soluciones
# Jefe de Proyecto: Ing. Ulises Tlatoani Vidal Rieder
# Desarrollador: Ing. Salvador Daniel Pelayo Gómez.
# Analista: Lic. David Padilla Bobadilla
#
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_cp
| daniel2101/fleosa | fleosa/report/__init__.py | Python | gpl-3.0 | 1,219 |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c31_0_0', 'coding'])
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c31_0_0', 'type'] == 'Date'
assert tmp.loc['c31_0_0', 'description'] == 'A date'
assert tmp.loc['c34_0_0', 'field_id'] == '34'
assert tmp.loc['c34_0_0', 'inst'] == 0
assert tmp.loc['c34_0_0', 'arr'] == 0
assert tmp.loc['c34_0_0', 'coding'] == 9
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'type'] == 'Integer'
assert tmp.loc['c34_0_0', 'description'] == 'Some integer'
assert tmp.loc['c46_0_0', 'field_id'] == '46'
assert tmp.loc['c46_0_0', 'inst'] == 0
assert tmp.loc['c46_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c46_0_0', 'coding'])
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'type'] == 'Integer'
assert tmp.loc['c46_0_0', 'description'] == 'Some another integer'
assert tmp.loc['c47_0_0', 'field_id'] == '47'
assert tmp.loc['c47_0_0', 'inst'] == 0
assert tmp.loc['c47_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c47_0_0', 'coding'])
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c47_0_0', 'type'] == 'Continuous'
assert tmp.loc['c47_0_0', 'description'] == 'Some continuous value'
assert tmp.loc['c48_0_0', 'field_id'] == '48'
assert tmp.loc['c48_0_0', 'inst'] == 0
assert tmp.loc['c48_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c48_0_0', 'coding'])
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'type'] == 'Time'
assert tmp.loc['c48_0_0', 'description'] == 'Some time'
def test_postgresql_auxiliary_table_check_types(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
sql_types = """
select column_name, data_type
from information_schema.columns
where table_name = 'fields';
"""
tmp = pd.read_sql(sql_types, create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['field_id', 'data_type'] == 'text'
assert tmp.loc['inst', 'data_type'] == 'bigint'
assert tmp.loc['arr', 'data_type'] == 'bigint'
assert tmp.loc['coding', 'data_type'] == 'bigint'
assert tmp.loc['table_name', 'data_type'] == 'text'
assert tmp.loc['type', 'data_type'] == 'text'
assert tmp.loc['description', 'data_type'] == 'text'
def test_postgresql_auxiliary_table_constraints(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('fields', column_query='column_name', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
# index on 'event' column
constraint_sql = self._get_table_contrains('fields', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 6
assert 'arr' in columns
assert 'field_id' in columns
assert 'inst' in columns
assert 'table_name' in columns
assert 'type' in columns
assert 'coding' in columns
def test_postgresql_two_csv_files(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_1_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine))
expected_columns = ["eid","c100_0_0", "c100_1_0", "c100_2_0", "c110_0_0", "c120_0_0", "c130_0_0", "c140_0_0", "c150_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 5
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert int(tmp.loc[1, 'c34_0_0']) == -33
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 41.55312
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert tmp.loc[5, 'c21_0_0'] == 'Option number 5'
assert tmp.loc[5, 'c21_1_0'] == 'Maybe'
assert tmp.loc[5, 'c21_2_0'] == 'Probably'
assert pd.isnull(tmp.loc[5, 'c31_0_0'])
assert int(tmp.loc[5, 'c34_0_0']) == -4
assert int(tmp.loc[5, 'c46_0_0']) == 1
assert pd.isnull(tmp.loc[5, 'c47_0_0'])
assert tmp.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 3
assert int(tmp.loc[1, 'c100_0_0']) == -9
assert int(tmp.loc[1, 'c100_1_0']) == 3
assert pd.isnull(tmp.loc[1, 'c100_2_0'])
assert tmp.loc[1, 'c110_0_0'].round(5) == 42.55312
assert int(tmp.loc[1, 'c120_0_0']) == -33
assert tmp.loc[1, 'c130_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c140_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert tmp.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert pd.isnull(tmp.loc[3, 'c100_0_0'])
assert int(tmp.loc[3, 'c100_1_0']) == -4
assert int(tmp.loc[3, 'c100_2_0']) == -10
assert tmp.loc[3, 'c110_0_0'].round(5) == -35.31471
assert int(tmp.loc[3, 'c120_0_0']) == 0
assert tmp.loc[3, 'c130_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c140_0_0'].strftime('%Y-%m-%d') == '1997-04-15'
assert pd.isnull(tmp.loc[3, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_single_table(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2020-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '1990-02-15'
assert query_result.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_tables(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_multiple_tables(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_multiple_tables(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
def test_postgresql_two_csv_files_flipped_query_multiple_tables(self):
# Prepare
# In this test the files are just flipped
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv02, csv01), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_custom_columns(self):
# SQLite is very limited when selecting variables, renaming, doing math operations, etc
pass
def test_postgresql_query_custom_columns(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', '(c47_0_0 ^ 2.0) as c47_squared']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c47_squared'] for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[3, 'c47_0_0'].round(5) == -5.32471
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
assert query_result.loc[1, 'c47_squared'].round(5) == round(45.55412 ** 2, 5)
assert query_result.loc[2, 'c47_squared'].round(5) == round((-0.55461) ** 2, 5)
assert query_result.loc[3, 'c47_squared'].round(5) == round((-5.32471) ** 2, 5)
assert query_result.loc[4, 'c47_squared'].round(5) == round(55.19832 ** 2, 5)
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_filter(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
def test_postgresql_query_single_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_and_filter(self):
# 'RIGHT and FULL OUTER JOINs are not currently supported'
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
def test_postgresql_query_multiple_and_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
@unittest.skip('sqlite being removed')
def test_sqlite_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'] == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
# FIXME: this is strange, data type in this particular case needs np.round
assert np.round(tmp.loc[1, 'c47_0_0'], 5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'] == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'] == '2010-01-01'
def test_postgresql_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_timestamp_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example04.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert pd.isnull(tmp.loc[2, 'c48_0_0'])
assert tmp.loc[3, 'c47_0_0'].round(5) == -5.32471
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_integer_is_nan(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example06_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_first_row_is_nan_integer(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example07_first_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert pd.isnull(tmp.loc[1, 'c46_0_0'])
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_sql_chunksize01(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
assert len(chunk.index) == 2
if chunk_idx == 0:
indexes = (1, 2)
assert all(x in chunk.index for x in indexes)
else:
indexes = (3, 4)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
assert chunk.shape[0] == 2
if chunk_idx == 0:
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
else:
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_sql_chunksize02(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=3)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
if chunk_idx == 0:
assert len(chunk.index) == 3
indexes = (1, 2, 3)
assert all(x in chunk.index for x in indexes)
else:
assert len(chunk.index) == 1
indexes = (4,)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
if chunk_idx == 0:
assert chunk.shape[0] == 3
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
else:
assert chunk.shape[0] == 1
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_all_eids_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine))
expected_columns = ["eid"]
assert len(all_eids.columns) == len(expected_columns)
assert all(x in all_eids.columns for x in expected_columns)
## Check data is correct
all_eids = pd.read_sql('select * from all_eids', create_engine(db_engine), index_col='eid')
assert len(all_eids.index) == 6 + 4, len(all_eids.index)
assert 1000010 in all_eids.index
assert 1000020 in all_eids.index
assert 1000021 in all_eids.index
assert 1000030 in all_eids.index
assert 1000040 in all_eids.index
assert 1000041 in all_eids.index
assert 1000050 in all_eids.index
assert 1000060 in all_eids.index
assert 1000061 in all_eids.index
assert 1000070 in all_eids.index
def test_postgresql_all_eids_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example14')
csv_file1 = get_repository_path(os.path.join(directory, 'example14_00.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example14_01.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('all_eids'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('all_eids', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 1
assert 'eid' in columns
def test_postgresql_bgen_samples_table_created(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine))
expected_columns = ["index", "eid"]
assert len(samples_data.columns) == len(expected_columns)
assert all(x in samples_data.columns for x in expected_columns)
## Check data is correct
samples_data = pd.read_sql('select * from bgen_samples', create_engine(db_engine), index_col='index')
assert not samples_data.empty
assert samples_data.shape[0] == 5
assert samples_data.loc[1, 'eid'] == 1000050
assert samples_data.loc[2, 'eid'] == 1000030
assert samples_data.loc[3, 'eid'] == 1000040
assert samples_data.loc[4, 'eid'] == 1000010
assert samples_data.loc[5, 'eid'] == 1000020
def test_postgresql_bgen_samples_table_constraints(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('bgen_samples'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
# indexes
constraint_sql = self._get_table_contrains('bgen_samples', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 2
assert 'eid' in columns
assert 'index' in columns
def test_postgresql_events_tables_only_one_instance_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example10')
csv_file = get_repository_path(os.path.join(directory, 'example10_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 6
assert events_data.loc[0, 'eid'] == 1000020
assert events_data.loc[0, 'field_id'] == 84
assert events_data.loc[0, 'event'] == 'E103'
assert events_data.loc[1, 'eid'] == 1000020
assert events_data.loc[1, 'field_id'] == 84
assert events_data.loc[1, 'event'] == 'N308'
assert events_data.loc[2, 'eid'] == 1000020
assert events_data.loc[2, 'field_id'] == 84
assert events_data.loc[2, 'event'] == 'Q750'
assert events_data.loc[3, 'eid'] == 1000030
assert events_data.loc[3, 'field_id'] == 84
assert events_data.loc[3, 'event'] == 'N308'
assert events_data.loc[4, 'eid'] == 1000040
assert events_data.loc[4, 'field_id'] == 84
assert events_data.loc[4, 'event'] == 'N308'
assert events_data.loc[5, 'eid'] == 1000050
assert events_data.loc[5, 'field_id'] == 84
assert events_data.loc[5, 'event'] == 'E103'
def test_postgresql_events_tables_only_two_instances_filled(self):
# Prepare
directory = get_repository_path('pheno2sql/example11')
csv_file = get_repository_path(os.path.join(directory, 'example11_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check data is correct
assert not events_data.empty
assert events_data.shape[0] == 11
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
def test_postgresql_events_tables_two_categorical_fields_and_two_and_three_instances(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
events_data = pd.read_sql('select * from events order by eid, field_id, instance, event', create_engine(db_engine))
expected_columns = ['eid', 'field_id', 'instance', 'event']
assert len(events_data.columns) == len(expected_columns)
assert all(x in events_data.columns for x in expected_columns)
## Check total data
assert not events_data.empty
assert events_data.shape[0] == 25
# 1000010
cidx = 0
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000010
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1701'
# 1000020
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'J32'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000020
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
# 1000030
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000030
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1434'
# 1000040
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'N308'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == 'Q750'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1136'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000040
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '457'
# 1000050
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 84
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == 'E103'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 0
assert events_data.loc[cidx, 'event'] == '1434'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000050
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 1
assert events_data.loc[cidx, 'event'] == '1114'
# 1000060
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1114'
cidx += 1
assert events_data.loc[cidx, 'eid'] == 1000060
assert events_data.loc[cidx, 'field_id'] == 85
assert events_data.loc[cidx, 'instance'] == 2
assert events_data.loc[cidx, 'event'] == '1136'
def test_postgresql_events_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check samples table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('events'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('events', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 4
assert 'eid' in columns
assert 'field_id' in columns
assert 'instance' in columns
assert 'event' in columns
# index on 'event' column
constraint_sql = self._get_table_contrains('events', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine), index_col='index_name')
assert constraints_results is not None
assert not constraints_results.empty
assert constraints_results.shape[0] == 6
assert constraints_results.loc[['ix_events_eid']].shape[0] == 1
assert constraints_results.loc['ix_events_eid', 'column_name'] == 'eid'
assert constraints_results.loc[['ix_events_field_id']].shape[0] == 1
assert constraints_results.loc['ix_events_field_id', 'column_name'] == 'field_id'
assert constraints_results.loc[['ix_events_instance']].shape[0] == 1
assert constraints_results.loc['ix_events_instance', 'column_name'] == 'instance'
assert constraints_results.loc[['ix_events_event']].shape[0] == 1
assert constraints_results.loc['ix_events_event', 'column_name'] == 'event'
assert constraints_results.loc[['ix_events_field_id_event']].shape[0] == 2
assert 'field_id' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
assert 'event' in constraints_results.loc['ix_events_field_id_event', 'column_name'].tolist()
def test_postgresql_phenotypes_tables_check_constrains_exist(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=15, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('ukb_pheno_0_00', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
constraint_sql = self._get_table_contrains('ukb_pheno_0_01', column_query='eid', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
def test_postgresql_vacuum(self):
# Prepare
directory = get_repository_path('pheno2sql/example12')
csv_file = get_repository_path(os.path.join(directory, 'example12_diseases.csv'))
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data(vacuum=True)
# Validate
vacuum_data = pd.DataFrame()
query_count = 0
# FIXME waits for vacuum to finish
while vacuum_data.empty and query_count < 150:
vacuum_data = pd.read_sql("""
select relname, last_vacuum, last_analyze
from pg_stat_user_tables
where schemaname = 'public' and last_vacuum is not null and last_analyze is not null
""", db_engine)
query_count += 1
assert vacuum_data is not None
assert not vacuum_data.empty
def test_postgresql_load_data_non_utf_characters(self):
# Prepare
directory = get_repository_path('pheno2sql/example15')
csv_file1 = get_repository_path(os.path.join(directory, 'example15_00.csv')) # latin1
csv_file2 = get_repository_path(os.path.join(directory, 'example15_01.csv')) # latin1
csv_file3 = get_repository_path(os.path.join(directory, 'example15_02.csv')) # utf-8
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv_file1, csv_file2, csv_file3), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c21_1_0', 'c21_0_0', 'c103_0_0', 'c104_0_0', 'c221_0_0', 'c221_1_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 10
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.loc[1000041, 'c103_0_0'] == 'Optión 4'
assert query_result.loc[1000041, 'c104_0_0'] == '158'
assert query_result.loc[1000070, 'c21_1_0'] == 'Of course ñ'
assert query_result.loc[1000070, 'c21_0_0'] == 'Option number 7'
assert query_result.loc[1000050, 'c221_0_0'] == 'Option number 25'
assert query_result.loc[1000050, 'c221_1_0'] == 'Maybe ñó'
def test_postgresql_load_data_with_duplicated_data_field(self):
# Prepare
directory = get_repository_path('pheno2sql/example16')
csv_file1 = get_repository_path(os.path.join(directory, 'example1600.csv'))
csv_file2 = get_repository_path(os.path.join(directory, 'example1601.csv'))
db_engine = POSTGRESQL_ENGINE
# intentionally, load first "latest" dataset (since 1601 > 1600)
p2sql = Pheno2SQL((csv_file2, csv_file1), db_engine, bgen_sample_file=os.path.join(directory, 'impv2.sample'),
n_columns_per_table=2, loading_n_jobs=1)
# Run
p2sql.load_data()
columns = ['c103_0_0', 'c47_0_0', 'c50_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result.index.name == 'eid'
assert len(query_result.index) == 7 + 3, len(query_result.index)
assert not query_result.empty
assert query_result.shape[0] == 7 + 3, query_result.shape[0]
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
# this individuals should not have data for data-field 50, since we overwrote the old dataset (1600)
assert pd.isnull(query_result.loc[1000021, 'c50_0_0'])
assert pd.isnull(query_result.loc[1000041, 'c50_0_0'])
assert pd.isnull(query_result.loc[1000061, 'c50_0_0'])
# should keep "newest" data (in 1601, csv_file2)
assert query_result.loc[1000010, 'c50_0_0'] == 1.01
assert query_result.loc[1000020, 'c50_0_0'] == 1.05
assert query_result.loc[1000030, 'c50_0_0'] == 1.21
assert query_result.loc[1000040, 'c50_0_0'] == 1.25
assert query_result.loc[1000050, 'c50_0_0'] == 1.41
assert query_result.loc[1000060, 'c50_0_0'] == 1.45
assert query_result.loc[1000070, 'c50_0_0'] == 1.50
# check other data-fields
assert pd.isnull(query_result.loc[1000020, 'c103_0_0'])
assert pd.isnull(query_result.loc[1000040, 'c103_0_0'])
assert pd.isnull(query_result.loc[1000060, 'c103_0_0'])
assert pd.isnull(query_result.loc[1000070, 'c103_0_0'])
assert query_result.loc[1000010, 'c103_0_0'] == 'Option 1'
assert query_result.loc[1000021, 'c103_0_0'] == 'Option 2'
assert query_result.loc[1000030, 'c103_0_0'] == 'Option 3'
assert query_result.loc[1000041, 'c103_0_0'] == 'Option 4'
assert query_result.loc[1000050, 'c103_0_0'] == 'Option 5'
assert query_result.loc[1000061, 'c103_0_0'] == 'Option 6'
assert pd.isnull(query_result.loc[1000021, 'c47_0_0'])
assert pd.isnull(query_result.loc[1000041, 'c47_0_0'])
assert pd.isnull(query_result.loc[1000061, 'c47_0_0'])
assert query_result.loc[1000010, 'c47_0_0'] == 41.55312
assert query_result.loc[1000020, 'c47_0_0'] == -10.51461
assert query_result.loc[1000030, 'c47_0_0'] == -35.31471
assert query_result.loc[1000040, 'c47_0_0'] == 5.20832
assert pd.isnull(query_result.loc[1000050, 'c47_0_0'])
assert query_result.loc[1000060, 'c47_0_0'] == 0.55478
assert pd.isnull(query_result.loc[1000070, 'c47_0_0']) | miltondp/ukbrest | tests/test_pheno2sql.py | Python | gpl-3.0 | 95,438 |
# (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: jsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
version_added: "1.9"
author: Ansible Core (@ansible-core)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the JSON files
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the JSON files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import codecs
try:
import simplejson as json
except ImportError:
import json
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f, cls=AnsibleJSONDecoder)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
| mheap/ansible | lib/ansible/plugins/cache/jsonfile.py | Python | gpl-3.0 | 2,046 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Loader for qutebrowser extensions."""
import importlib.abc
import pkgutil
import types
import typing
import sys
import pathlib
import attr
from PyQt5.QtCore import pyqtSlot
from qutebrowser import components
from qutebrowser.config import config
from qutebrowser.utils import log, standarddir
from qutebrowser.misc import objects
if typing.TYPE_CHECKING:
import argparse
# ModuleInfo objects for all loaded plugins
_module_infos = []
@attr.s
class InitContext:
"""Context an extension gets in its init hook."""
data_dir = attr.ib() # type: pathlib.Path
config_dir = attr.ib() # type: pathlib.Path
args = attr.ib() # type: argparse.Namespace
@attr.s
class ModuleInfo:
"""Information attached to an extension module.
This gets used by qutebrowser.api.hook.
"""
_ConfigChangedHooksType = typing.List[typing.Tuple[typing.Optional[str],
typing.Callable]]
skip_hooks = attr.ib(False) # type: bool
init_hook = attr.ib(None) # type: typing.Optional[typing.Callable]
config_changed_hooks = attr.ib(
attr.Factory(list)) # type: _ConfigChangedHooksType
@attr.s
class ExtensionInfo:
"""Information about a qutebrowser extension."""
name = attr.ib() # type: str
def add_module_info(module: types.ModuleType) -> ModuleInfo:
"""Add ModuleInfo to a module (if not added yet)."""
# pylint: disable=protected-access
if not hasattr(module, '__qute_module_info'):
module.__qute_module_info = ModuleInfo() # type: ignore
return module.__qute_module_info # type: ignore
def load_components(*, skip_hooks: bool = False) -> None:
"""Load everything from qutebrowser.components."""
for info in walk_components():
_load_component(info, skip_hooks=skip_hooks)
def walk_components() -> typing.Iterator[ExtensionInfo]:
"""Yield ExtensionInfo objects for all modules."""
if hasattr(sys, 'frozen'):
yield from _walk_pyinstaller()
else:
yield from _walk_normal()
def _on_walk_error(name: str) -> None:
raise ImportError("Failed to import {}".format(name))
def _walk_normal() -> typing.Iterator[ExtensionInfo]:
"""Walk extensions when not using PyInstaller."""
for _finder, name, ispkg in pkgutil.walk_packages(
# Only packages have a __path__ attribute,
# but we're sure this is one.
path=components.__path__, # type: ignore
prefix=components.__name__ + '.',
onerror=_on_walk_error):
if ispkg:
continue
yield ExtensionInfo(name=name)
def _walk_pyinstaller() -> typing.Iterator[ExtensionInfo]:
"""Walk extensions when using PyInstaller.
See https://github.com/pyinstaller/pyinstaller/issues/1905
Inspired by:
https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py
"""
toc = set() # type: typing.Set[str]
for importer in pkgutil.iter_importers('qutebrowser'):
if hasattr(importer, 'toc'):
toc |= importer.toc
for name in toc:
if name.startswith(components.__name__ + '.'):
yield ExtensionInfo(name=name)
def _get_init_context() -> InitContext:
"""Get an InitContext object."""
return InitContext(data_dir=pathlib.Path(standarddir.data()),
config_dir=pathlib.Path(standarddir.config()),
args=objects.args)
def _load_component(info: ExtensionInfo, *,
skip_hooks: bool = False) -> types.ModuleType:
"""Load the given extension and run its init hook (if any).
Args:
skip_hooks: Whether to skip all hooks for this module.
This is used to only run @cmdutils.register decorators.
"""
log.extensions.debug("Importing {}".format(info.name))
mod = importlib.import_module(info.name)
mod_info = add_module_info(mod)
if skip_hooks:
mod_info.skip_hooks = True
if mod_info.init_hook is not None and not skip_hooks:
log.extensions.debug("Running init hook {!r}"
.format(mod_info.init_hook.__name__))
mod_info.init_hook(_get_init_context())
_module_infos.append(mod_info)
return mod
@pyqtSlot(str)
def _on_config_changed(changed_name: str) -> None:
"""Call config_changed hooks if the config changed."""
for mod_info in _module_infos:
if mod_info.skip_hooks:
continue
for option, hook in mod_info.config_changed_hooks:
if option is None:
hook()
else:
cfilter = config.change_filter(option)
cfilter.validate()
if cfilter.check_match(changed_name):
hook()
def init() -> None:
config.instance.changed.connect(_on_config_changed)
| t-wissmann/qutebrowser | qutebrowser/extensions/loader.py | Python | gpl-3.0 | 5,652 |
# Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Implement "rose task-run"."""
import os
import sys
import traceback
from metomi.rose.app_run import AppRunner
from metomi.rose.env import env_export
from metomi.rose.opt_parse import RoseOptionParser
from metomi.rose.popen import RosePopenError
from metomi.rose.reporter import Reporter
from metomi.rose.run import Runner
from metomi.rose.task_env import get_prepend_paths
class TaskAppNotFoundError(Exception):
"""Error: a task has no associated application configuration."""
def __str__(self):
return "%s (key=%s): task has no associated application." % self.args
class TaskRunner(Runner):
"""A wrapper to a Rose task."""
NAME = "task"
OPTIONS = AppRunner.OPTIONS + [
"app_key",
"cycle",
"cycle_offsets",
"path_globs",
"prefix_delim",
"suffix_delim",
]
def __init__(self, *args, **kwargs):
Runner.__init__(self, *args, **kwargs)
self.app_runner = AppRunner(
event_handler=self.event_handler,
popen=self.popen,
config_pm=self.config_pm,
fs_util=self.fs_util,
suite_engine_proc=self.suite_engine_proc,
)
def run_impl(self, opts, args, uuid, work_files):
"""Run application configuration as a suite task."""
# "rose task-env"
t_prop = self.suite_engine_proc.get_task_props(
cycle=opts.cycle,
cycle_offsets=opts.cycle_offsets,
prefix_delim=opts.prefix_delim,
suffix_delim=opts.suffix_delim,
)
is_changed = False
for key, value in t_prop:
if os.getenv(key) != value:
env_export(key, value, self.event_handler)
is_changed = True
path_globs = opts.path_globs
if path_globs is None:
path_globs = []
prepend_paths_map = get_prepend_paths(
self.event_handler,
t_prop.suite_dir,
path_globs,
full_mode=is_changed,
)
for key, prepend_paths in prepend_paths_map.items():
orig_paths = []
orig_v = os.getenv(key, "")
if orig_v:
orig_paths = orig_v.split(os.pathsep)
value = os.pathsep.join(prepend_paths + orig_paths)
env_export(key, value, self.event_handler)
# Name association with builtin applications
builtin_app = None
if opts.app_mode is None:
builtin_apps_manager = self.app_runner.builtins_manager
builtin_app = builtin_apps_manager.guess_handler(t_prop.task_name)
if builtin_app is not None:
opts.app_mode = builtin_app.SCHEME
# Determine what app config to use
if not opts.conf_dir:
for app_key in [opts.app_key, os.getenv("ROSE_TASK_APP")]:
if app_key is not None:
conf_dir = os.path.join(t_prop.suite_dir, "app", app_key)
if not os.path.isdir(conf_dir):
raise TaskAppNotFoundError(t_prop.task_name, app_key)
break
else:
app_key = t_prop.task_name
conf_dir = os.path.join(
t_prop.suite_dir, "app", t_prop.task_name
)
if (
not os.path.isdir(conf_dir)
and builtin_app is not None
and builtin_app.get_app_key(t_prop.task_name)
):
# A builtin application may select a different app_key
# based on the task name.
app_key = builtin_app.get_app_key(t_prop.task_name)
conf_dir = os.path.join(t_prop.suite_dir, "app", app_key)
if not os.path.isdir(conf_dir):
raise TaskAppNotFoundError(t_prop.task_name, app_key)
opts.conf_dir = conf_dir
return self.app_runner(opts, args)
def main():
"""Launcher for the CLI."""
opt_parser = RoseOptionParser(
usage='rose task-run [OPTIONS] [--] [APP-COMMAND ...]',
description='''
Provide an environment to run a suite task.
Provides environment variables documented in `rose task-env`. It is worth
noting that if the environment variables are already provided by
`rose task-env`, this command will not override them.
Normally, the suite task will select a Rose application configuration
that has the same name as the task. This can be overridden by the
`--app-key=KEY` option or the `ROSE_TASK_APP` environment variable.
SHORT OPTIONS
All options of `rose app-run` and `rose task-env` are supported.
Additional options are:
--app-key=KEY
Specify a named application configuration.
''',
epilog='''
ENVIRONMENT VARIABLES
All environment variables of `rose app-run` and `rose task-env` are
supported. All environment variables documented in `rose task-env` are
passed to the application `rose task-run` runs.
The following environment variables are used by `rose task-run`:
ROSE_TASK_APP
Specify a named application configuration.
SEE ALSO
* `rose app-run`
* `rose task-env`
''',
)
option_keys = TaskRunner.OPTIONS
opt_parser.add_my_options(*option_keys)
opts, args = opt_parser.parse_args()
event_handler = Reporter(opts.verbosity - opts.quietness)
runner = TaskRunner(event_handler)
try:
sys.exit(runner(opts, args))
except Exception as exc:
runner.handle_event(exc)
if opts.debug_mode:
traceback.print_exc()
if isinstance(exc, RosePopenError):
sys.exit(exc.ret_code)
else:
sys.exit(1)
if __name__ == "__main__":
main()
| metomi/rose | metomi/rose/task_run.py | Python | gpl-3.0 | 6,607 |
# Generated by Django 2.0.5 on 2018-05-28 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0196_auto_20180528_0902'),
]
operations = [
migrations.AddField(
model_name='docpar',
name='endColor',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='endFont',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='endFontsize',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='height',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='imputed_role',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='maxX',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='maxY',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='minX',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='minY',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='mostCommonColor',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='mostCommonFont',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='mostCommonFontsize',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='page',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='docpar',
name='role',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='startColor',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='startFont',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='docpar',
name='startFontsize',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='docpar',
name='width',
field=models.FloatField(null=True),
),
]
| mcallaghan/tmv | BasicBrowser/scoping/migrations/0197_auto_20180528_1537.py | Python | gpl-3.0 | 3,055 |
import copy
from decimal import Decimal
import datetime
from celery_formtask.tasks import processform, PROGRESS_STATE
class FormTaskMixin:
def __init__(self, *args, task=None, ignored_kwargs=[], **kwargs):
self._task = task
self._args_bak = args
self._kwargs_bak = copy.copy(kwargs)
for kwarg in ignored_kwargs:
kwargs.pop(kwarg)
super().__init__(*args, **kwargs)
def set_progress(self, current=None, total=None, description=""):
if self._task:
meta_total = total or getattr(self, "formtask_total_steps", None)
meta_description = description or getattr(
self, "formtask_description", None
)
if (
meta_total is not None
and meta_total > 0
and current is not None
and current >= 0
):
percent = (Decimal(current) / Decimal(meta_total)) * Decimal(100)
meta_percent = float(round(percent, 2))
else:
meta_percent = None
self._task.update_state(
state=PROGRESS_STATE,
meta={
"current": current,
"total": meta_total,
"percent": meta_percent,
"description": meta_description,
"time": datetime.datetime.utcnow(),
},
)
def enqueue(self, name=None):
opts = {"shadow": name} if name else {}
async_result = processform.apply_async(
args=(self.__class__.__module__, self.__class__.__name__) + self._args_bak,
kwargs=self._kwargs_bak,
**opts
)
return async_result
| HumanExposure/factotum | celery_formtask/forms.py | Python | gpl-3.0 | 1,765 |
#coding:utf8
from baseclass.DeepRecommender import DeepRecommender
import numpy as np
from random import choice,random,randint,shuffle
from tool import config
import tensorflow as tf
#According to the paper, we only
class DMF(DeepRecommender):
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(DMF, self).__init__(conf,trainingSet,testSet,fold)
def next_batch(self,i):
rows = np.zeros(((self.negative_sp+1)*self.batch_size,self.num_items))
cols = np.zeros(((self.negative_sp+1)*self.batch_size,self.num_users))
batch_idx = range(self.batch_size*i,self.batch_size*(i+1))
users = [self.data.trainingData[idx][0] for idx in batch_idx]
items = [self.data.trainingData[idx][1] for idx in batch_idx]
u_idx = [self.data.user[u] for u in users]
v_idx = [self.data.item[i] for i in items]
ratings = [float(self.data.trainingData[idx][2]) for idx in batch_idx]
for i,user in enumerate(users):
rows[i] = self.data.row(user)
for i,item in enumerate(items):
cols[i] = self.data.col(item)
userList = self.data.user.keys()
itemList = self.data.item.keys()
#negative sample
for i in range(self.negative_sp*self.batch_size):
u = choice(userList)
v = choice(itemList)
while self.data.contains(u,v):
u = choice(userList)
v = choice(itemList)
rows[self.batch_size-1+i]=self.data.row(u)
cols[self.batch_size-1+i]=self.data.col(i)
u_idx.append(self.data.user[u])
v_idx.append(self.data.item[v])
ratings.append(0)
return rows,cols,np.array(ratings),np.array(u_idx),np.array(v_idx)
def initModel(self):
super(DMF, self).initModel()
n_input_u = len(self.data.item)
n_input_i = len(self.data.user)
self.negative_sp = 5
self.n_hidden_u=[256,512]
self.n_hidden_i=[256,512]
self.input_u = tf.placeholder(tf.float, [None, n_input_u])
self.input_i = tf.placeholder(tf.float, [None, n_input_i])
def buildModel(self):
super(DMF, self).buildModel_tf()
initializer = tf.contrib.layers.xavier_initializer()
#user net
user_W1 = tf.Variable(initializer([self.num_items, self.n_hidden_u[0]],stddev=0.01))
self.user_out = tf.nn.relu(tf.matmul(self.input_u, user_W1))
self.regLoss = tf.nn.l2_loss(user_W1)
for i in range(1, len(self.n_hidden_u)):
W = tf.Variable(initializer([self.n_hidden_u[i-1], self.n_hidden_u[i]],stddev=0.01))
b = tf.Variable(initializer([self.n_hidden_u[i]],stddev=0.01))
self.regLoss = tf.add(self.regLoss,tf.nn.l2_loss(W))
self.regLoss = tf.add(self.regLoss, tf.nn.l2_loss(b))
self.user_out = tf.nn.relu(tf.add(tf.matmul(self.user_out, W), b))
#item net
item_W1 = tf.Variable(initializer([self.num_users, self.n_hidden_i[0]],stddev=0.01))
self.item_out = tf.nn.relu(tf.matmul(self.input_i, item_W1))
self.regLoss = tf.add(self.regLoss, tf.nn.l2_loss(item_W1))
for i in range(1, len(self.n_hidden_i)):
W = tf.Variable(initializer([self.n_hidden_i[i-1], self.n_hidden_i[i]],stddev=0.01))
b = tf.Variable(initializer([self.n_hidden_i[i]],stddev=0.01))
self.regLoss = tf.add(self.regLoss, tf.nn.l2_loss(W))
self.regLoss = tf.add(self.regLoss, tf.nn.l2_loss(b))
self.item_out = tf.nn.relu(tf.add(tf.matmul(self.item_out, W), b))
norm_user_output = tf.sqrt(tf.reduce_sum(tf.square(self.user_out), axis=1))
norm_item_output = tf.sqrt(tf.reduce_sum(tf.square(self.item_out), axis=1))
self.y_ = tf.reduce_sum(tf.multiply(self.user_out, self.item_out), axis=1) / (
norm_item_output * norm_user_output)
self.y_ = tf.maximum(1e-6, self.y_)
self.loss = self.r*tf.log(self.y_) + (1 - self.r) * tf.log(1 - self.y_)#tf.nn.sigmoid_cross_entropy_with_logits(logits=self.y_,labels=self.r)
#self.loss = tf.nn.l2_loss(tf.subtract(self.y_,self.r))
self.loss = -tf.reduce_sum(self.loss)
reg_lambda = tf.constant(self.regU, dtype=tf.float32)
self.regLoss = tf.multiply(reg_lambda,self.regLoss)
self.loss = tf.add(self.loss,self.regLoss)
optimizer = tf.train.AdamOptimizer(self.lRate).minimize(self.loss)
self.U = np.zeros((self.num_users, self.n_hidden_u[-1]))
self.V = np.zeros((self.num_items, self.n_hidden_u[-1]))
init = tf.global_variables_initializer()
self.sess.run(init)
total_batch = int(len(self.data.trainingData)/ self.batch_size)
for epoch in range(self.maxIter):
shuffle(self.data.trainingData)
for i in range(total_batch):
users,items,ratings,u_idx,v_idx = self.next_batch(i)
shuffle_idx=np.random.permutation(range(len(users)))
users = users[shuffle_idx]
items = items[shuffle_idx]
ratings = ratings[shuffle_idx]
u_idx = u_idx[shuffle_idx]
v_idx = v_idx[shuffle_idx]
_,loss= self.sess.run([optimizer, self.loss], feed_dict={self.input_u: users,self.input_i:items,self.r:ratings})
print self.foldInfo, "Epoch:", '%04d' % (epoch + 1), "Batch:", '%03d' % (i + 1), "loss=", "{:.9f}".format(loss)
#save the output layer
U_embedding, V_embedding = self.sess.run([self.user_out, self.item_out], feed_dict={self.input_u: users,self.input_i:items})
for ue,u in zip(U_embedding,u_idx):
self.U[u]=ue
for ve,v in zip(V_embedding,v_idx):
self.V[v]=ve
self.normalized_V = np.sqrt(np.sum(self.V * self.V, axis=1))
self.normalized_U = np.sqrt(np.sum(self.U * self.U, axis=1))
self.ranking_performance()
print("Optimization Finished!")
def predictForRanking(self, u):
'invoked to rank all the items for the user'
if self.data.containsUser(u):
uid = self.data.user[u]
return np.divide(self.V.dot(self.U[uid]),self.normalized_U[uid]*self.normalized_V)
else:
return [self.data.globalMean] * self.num_items
| Coder-Yu/RecQ | algorithm/ranking/DMF.py | Python | gpl-3.0 | 6,418 |
from list_node import ListNode
from instruction import Instruction, Subneg4Instruction
def sr_mult(WM, LM):
namespace_bak = WM.getNamespace(string=False)
WM.setNamespace(["sr","mult"])
c_0 = WM.const(0)
c_1 = WM.const(1)
c_m1 = WM.const(-1)
c_32 = WM.const(32)
a = WM.addDataWord(0, "arg1")
b = WM.addDataWord(0, "arg2")
ret_addr = WM.addDataPtrWord(0, "ret_addr")
temp = WM.addDataWord(0, "temp")
count = WM.addDataWord(0, "count")
hi = WM.addDataWord(0, "hi")
lo = WM.addDataWord(0, "lo")
sign = WM.addDataWord(0, "sign")
NEXT = WM.getNext()
#HALT = WM.getHalt()
LNs = (
LM.new(ListNode("sr_mult", sys = True)),
LM.new(ListNode(Subneg4Instruction(
c_1.getPtr(),
a.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L010")
)), "sr_mult_start"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L020")
))),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_0.getPtr(),
a.getPtr(),
NEXT
)),"sr_mult_L010"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_1.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_1.getPtr(),
b.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L030")
)),"sr_mult_L020"),
LM.new(ListNode(Subneg4Instruction(
c_32.getPtr(),
c_0.getPtr(),
count.getPtr(),
WM.label("sr_mult_L050")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_0.getPtr(),
b.getPtr(),
NEXT
)),"sr_mult_L030"),
LM.new(ListNode(Subneg4Instruction(
sign.getPtr(),
c_m1.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_32.getPtr(),
c_0.getPtr(),
count.getPtr(),
NEXT
)),"sr_mult_L040"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
hi.getPtr(),
NEXT
)),"sr_mult_L050"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
hi.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L100"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L110")
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L110"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
lo.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L200"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
lo.getPtr(),
lo.getPtr(),
WM.label("sr_mult_L300")
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
b.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L500")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
lo.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L500")
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
)),"sr_mult_L300"),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
lo.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
)),"sr_mult_L500"),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L800"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
a.getPtr(),
a.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
count.getPtr(),
count.getPtr(),
WM.label("sr_mult_L100")
))),
LM.new(ListNode(Subneg4Instruction(
sign.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L990")
)),"sr_mult_L900"),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_0.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
hi.getPtr(),
c_0.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
hi.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L990"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
lo.getPtr(),
temp.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
ret_addr
)))
)
WM.setNamespace(namespace_bak)
return LNs, a, b, ret_addr, lo | AZQ1994/s4compiler | sr_mult.py | Python | gpl-3.0 | 5,281 |
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
class ADBaseCommand(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--overwrite',
action='store_true',
dest='overwrite',
default=False,
help='Overwrite existed images'),
)
url_validator = URLValidator()
def valid_url(self, url):
try:
self.url_validator(url)
return True
except ValidationError, e:
return False
def handle(self, *args, **options):
if options.get('overwrite'):
self.stdout.write('==== Overwrite mode enabled, all of images will be re-download ===\n')
| xuqingkuang/tsCloud | tsCloud/ad/management/commands/_private.py | Python | gpl-3.0 | 841 |
# This tool convert KangXiZiDian djvu files to tiff files.
# Download djvu files: http://bbs.dartmouth.edu/~fangq/KangXi/KangXi.tar
# Character page info: http://wenq.org/unihan/Unihan.txt as kIRGKangXi field.
# Character seek position in Unihan.txt http://wenq.org/unihan/unihandata.txt
# DjVuLibre package provides the ddjvu tool.
# The 410 page is bad, but it should be blank page in fact. so just remove 410.tif
import os
if __name__ == "__main__":
os.system("mkdir tif")
pages = list(range(1, 1683+1))
for i in pages:
page = str(i)
print(page)
os.system("ddjvu -format=tiff -page="+ page + " -scale=100 -quality=150 KangXiZiDian.djvu"+ " tif/" + page + ".tif")
| huzheng001/stardict-3 | tools/src/KangXiZiDian-djvu2tiff.py | Python | gpl-3.0 | 677 |
# Copyright (C) 2018-2022 Yannick Jadoul
#
# This file is part of Parselmouth.
#
# Parselmouth is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Parselmouth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Parselmouth. If not, see <http://www.gnu.org/licenses/>
import pytest
import pytest_lazyfixture
import parselmouth
def combined_fixture(*args, **kwargs):
return pytest.fixture(params=map(pytest_lazyfixture.lazy_fixture, args), ids=args, **kwargs)
@pytest.fixture
def sound_path(resources):
yield resources["the_north_wind_and_the_sun.wav"]
@pytest.fixture
def sound(sound_path):
yield parselmouth.read(sound_path)
@pytest.fixture
def intensity(sound):
yield sound.to_intensity()
@pytest.fixture
def pitch(sound):
yield sound.to_pitch()
@pytest.fixture
def spectrogram(sound):
yield sound.to_spectrogram()
@combined_fixture('intensity', 'pitch', 'spectrogram', 'sound')
def sampled(request):
yield request.param
@combined_fixture('sampled')
def thing(request):
yield request.param
@pytest.fixture
def text_grid_path(resources):
yield resources["the_north_wind_and_the_sun.TextGrid"]
@pytest.fixture
def text_grid(text_grid_path):
yield parselmouth.read(text_grid_path)
@pytest.fixture
def script_path(resources):
yield resources["script.praat"]
| YannickJadoul/Parselmouth | tests/resource_fixtures.py | Python | gpl-3.0 | 1,735 |
#!/usr/bin/python3
# Copyright 2017-2021 Joel Allen Luellwitz and Emily Frost
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The project test runner. To run the test suite, execute the following from the project
root:
CLEAR_GPG_AGENT_CACHE=true python3 -m unittest
"""
__author__ = 'Joel Luellwitz and Emily Frost'
__version__ = '0.8'
import unittest
from test.gpgmailbuildertest import GpgMailBuilderTest
if __name__ == '__main__':
unittest.main()
| park-bench/gpgmailer | test/__init__.py | Python | gpl-3.0 | 1,050 |
# vim: set fileencoding=utf-8 :
# GNU Solfege - free ear training software
# Copyright (C) 2009, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
import os
import StringIO
import subprocess
from gi.repository import Gtk
from solfege.esel import SearchView
if __name__ == '__main__':
from solfege import i18n
i18n.setup(".", "C")
import solfege.statistics
solfege.db = solfege.statistics.DB()
import solfege
from solfege import cfg
from solfege import filesystem
from solfege import gu
from solfege import frontpage as pd
from solfege import lessonfile
from solfege import osutils
class LessonFilePreviewWidget(Gtk.VBox):
def __init__(self, model):
Gtk.VBox.__init__(self)
self.m_model = model
self.set_size_request(200, 200)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Title:</b>")
self.pack_start(l, False, False, 0)
self.g_title = Gtk.Label()
self.g_title.set_alignment(0.0, 0.5)
self.pack_start(self.g_title, False, False, 0)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Module:</b>")
self.pack_start(l, False, False, 0)
self.g_module = Gtk.Label()
self.g_module.set_alignment(0.0, 0.5)
self.pack_start(self.g_module, False, False, 0)
l = Gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup("<b>Used in topcis:</b>")
self.pack_start(l, False, False, 0)
self.g_topic_box = Gtk.VBox()
self.pack_start(self.g_topic_box, False, False, 0)
self.show_all()
def update(self, dlg):
fn = dlg.get_preview_filename()
if fn:
fn = gu.decode_filename(fn)
for child in self.g_topic_box.get_children():
child.destroy()
fn = lessonfile.mk_uri(fn)
try:
self.set_sensitive(True)
self.g_title.set_text(lessonfile.infocache.get(fn, 'title'))
self.g_module.set_text(lessonfile.infocache.get(fn, 'module'))
self.g_ok_button.set_sensitive(True)
for x in self.m_model.iterate_topics_for_file(fn):
l = Gtk.Label(label=x)
l.set_alignment(0.0, 0.5)
self.g_topic_box.pack_start(l, False, False, 0)
if not self.g_topic_box.get_children():
l = Gtk.Label(label=u"-")
l.set_alignment(0.0, 0.5)
self.g_topic_box.pack_start(l, False, False, 0)
except (lessonfile.InfoCache.FileNotFound,
lessonfile.InfoCache.FileNotLessonfile), e:
self.g_title.set_text(u'')
self.g_module.set_text(u'')
self.g_ok_button.set_sensitive(False)
self.set_sensitive(False)
self.show_all()
return True
class SelectLessonFileDialog(Gtk.FileChooserDialog):
def __init__(self, parent):
Gtk.FileChooserDialog.__init__(self, _("Select lesson file"),
parent=parent,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,))
self.set_select_multiple(True)
pv = LessonFilePreviewWidget(parent.m_model)
pv.g_ok_button = self.add_button("gtk-ok", Gtk.ResponseType.OK)
pv.g_ok_button.set_sensitive(False)
pv.show()
self.set_preview_widget(pv)
self.connect('selection-changed', pv.update)
class SelectLessonfileBySearchDialog(Gtk.Dialog):
def __init__(self):
Gtk.Dialog.__init__(self, buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.ACCEPT))
view = SearchView(_('Search for exercises. Each exercise you click will be added to the section of the front page.'),
fields=['link-with-filename-tooltip', 'module'])
view.on_link_clicked = self.on_link_clicked
self.vbox.pack_start(view, True, True, 0)
self.show_all()
def on_link_clicked(self, widget, filename):
self.m_filename = filename
self.response(Gtk.ResponseType.OK)
def editor_of(obj):
"""
Return the toplevel page, the one that is a Editor object.
"""
p = obj
while not isinstance(p, Editor):
p = p.m_parent
return p
def parent_page(obj):
"""
Return the parent page of obj. Return None if this is the toplevel page.
"""
p = obj
while True:
try:
p = p.m_parent
except AttributeError:
return None
if isinstance(p, Page):
return p
if p is None:
return None
class Section(Gtk.VBox):
"""
A section consists of a heading and a list of links.
self.g_link_box is a vbox that contains the links.
"""
def __init__(self, model, parent):
Gtk.VBox.__init__(self)
self.m_model = model
self.m_parent = parent
assert isinstance(model, pd.LinkList)
hbox = Gtk.HBox()
hbox.set_spacing(6)
self.pack_start(hbox, False, False, 0)
# This is displayed and used when we edit the heading
self.g_heading_entry = Gtk.Entry()
self.g_heading_entry.set_no_show_all(True)
hbox.pack_start(self.g_heading_entry, True, True, 0)
self.g_heading = Gtk.Label()
self.g_heading.set_alignment(0.0, 0.5)
# FIXME escape m_name
self.g_heading.set_markup("<b>%s</b>" % model.m_name)
hbox.pack_start(self.g_heading, False, False, 0)
###
button_hbox = Gtk.HBox()
button_hbox.set_spacing(0)
hbox.pack_start(button_hbox, False, False, 0)
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_EDIT, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('clicked', self.on_edit_heading)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_ADD, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('button-release-event', self.on_add)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_REMOVE, Gtk.IconSize.MENU)
button = Gtk.Button()
button.add(im)
button.connect('button-release-event', self.on_remove)
button_hbox.pack_start(button, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_CUT, Gtk.IconSize.MENU)
b = Gtk.Button()
b.add(im)
b.connect('clicked', self.on_cut)
button_hbox.pack_start(b, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_PASTE, Gtk.IconSize.MENU)
b = Gtk.Button()
b.add(im)
b.connect('clicked', self.on_paste, -1)
Editor.clipboard.register_paste_button(b, (pd.LinkList, pd.Page, unicode))
button_hbox.pack_start(b, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.MENU)
self.g_move_down_btn = Gtk.Button()
self.g_move_down_btn.add(im)
self.g_move_down_btn.connect('clicked',
self.m_parent.move_section_down, self)
button_hbox.pack_start(self.g_move_down_btn, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.MENU)
self.g_move_up_btn = Gtk.Button()
self.g_move_up_btn.add(im)
self.g_move_up_btn.connect('clicked',
self.m_parent.move_section_up, self)
button_hbox.pack_start(self.g_move_up_btn, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_BACK, Gtk.IconSize.MENU)
self.g_move_left_btn = Gtk.Button()
self.g_move_left_btn.add(im)
self.g_move_left_btn.connect('clicked',
parent.m_parent.on_move_section_left, self)
button_hbox.pack_start(self.g_move_left_btn, False, False, 0)
###
im = Gtk.Image()
im.set_from_stock(Gtk.STOCK_GO_FORWARD, Gtk.IconSize.MENU)
self.g_move_right_btn = Gtk.Button()
self.g_move_right_btn.add(im)
self.g_move_right_btn.connect('clicked',
parent.m_parent.on_move_section_right, self)
button_hbox.pack_start(self.g_move_right_btn, False, False, 0)
###
self.g_link_box = Gtk.VBox()
self.pack_start(self.g_link_box, False, False, 0)
for link in self.m_model:
self.g_link_box.pack_start(self.create_linkrow(link), True, True, 0)
# The button to click to add a new link
hbox = Gtk.HBox()
self.pack_start(hbox, True, True, 0)
def on_edit_heading(self, btn):
self.g_heading_entry.set_text(self.m_model.m_name)
self.g_heading_entry.show()
self.g_heading.hide()
self.g_heading_entry.grab_focus()
def finish_edit(entry):
self.g_heading_entry.disconnect(sid)
self.g_heading_entry.disconnect(keyup_id)
self.g_heading_entry.disconnect(keydown_sid)
self.m_model.m_name = entry.get_text()
self.g_heading.set_markup(u"<b>%s</b>" % entry.get_text())
self.g_heading_entry.hide()
self.g_heading.show()
sid = self.g_heading_entry.connect('activate', finish_edit)
def keydown(entry, event):
if event.keyval == Gdk.KEY_Tab:
finish_edit(entry)
keydown_sid = self.g_heading_entry.connect('key-press-event', keydown)
def keyup(entry, event):
if event.keyval == Gdk.KEY_Escape:
self.g_heading_entry.disconnect(sid)
self.g_heading_entry.disconnect(keyup_id)
self.g_heading_entry.hide()
self.g_heading.show()
return True
keyup_id = self.g_heading_entry.connect('key-release-event', keyup)
def on_add(self, btn, event):
menu = Gtk.Menu()
item = Gtk.MenuItem(_("Add link to new page"))
item.connect('activate', self.on_add_link_to_new_page)
menu.append(item)
item = Gtk.MenuItem(_("Add link to exercise"))
item.connect('activate', self.on_add_link)
menu.append(item)
item = Gtk.MenuItem(_("Add link by searching for exercises"))
item.connect('activate', self.on_add_link_by_search)
menu.append(item)
menu.show_all()
menu.popup(None, None, None, None, event.button, event.time)
def on_remove(self, btn, event):
self.m_parent.remove_section(self)
def on_add_link_by_search(self, btn):
dlg = SelectLessonfileBySearchDialog()
while True:
ret = dlg.run()
if ret == Gtk.ResponseType.OK:
self._add_filenames([os.path.abspath(lessonfile.uri_expand(dlg.m_filename))])
else:
break
dlg.destroy()
def on_add_link(self, btn):
if editor_of(self).m_filename:
open_dir = os.path.split(editor_of(self).m_filename)[0]
else:
open_dir = filesystem.user_data()
dlg = SelectLessonFileDialog(editor_of(self))
dlg.set_current_folder(open_dir)
while 1:
ret = dlg.run()
if ret in (Gtk.ResponseType.REJECT, Gtk.ResponseType.DELETE_EVENT, Gtk.ResponseType.CANCEL):
break
else:
assert ret == Gtk.ResponseType.OK
self._add_filenames(dlg.get_filenames())
break
dlg.destroy()
def _add_filenames(self, filenames):
for filename in filenames:
fn = gu.decode_filename(filename)
assert os.path.isabs(fn)
# If the file name is a file in a subdirectory below
# lessonfile.exercises_dir in the current working directory,
# then the file is a standard lesson file, and it will be
# converted to a uri scheme with:
fn = lessonfile.mk_uri(fn)
# Small test to check that the file actually is a lesson file.
try:
lessonfile.infocache.get(fn, 'title')
except lessonfile.infocache.FileNotLessonfile:
continue
self.m_model.append(fn)
self.g_link_box.pack_start(self.create_linkrow(fn, True, True, 0), False)
def on_add_link_to_new_page(self, menuitem):
page = pd.Page(_("Untitled%s") % "", [pd.Column()])
self.m_model.append(page)
self.g_link_box.pack_start(self.create_linkrow(page, True, True, 0))
def create_linkrow(self, link_this):
hbox = Gtk.HBox()
def ff(btn, page):
if id(page) in editor_of(self).m_page_mapping:
editor_of(self).show_page_id(id(page))
else:
if not page[0]:
page[0].append(pd.LinkList(link_this.m_name))
p = Page(page, parent_page(self))
p.show()
editor_of(self).add_page(p)
if isinstance(link_this, pd.Page):
linkbutton = gu.ClickableLabel(link_this.m_name)
linkbutton.connect('clicked', ff, link_this)
else:
try:
linkbutton = gu.ClickableLabel(lessonfile.infocache.get(link_this, 'title'))
linkbutton.set_tooltip_text(link_this)
except lessonfile.InfoCache.FileNotFound:
linkbutton = gu.ClickableLabel(_(u"«%s» was not found") % link_this)
linkbutton.make_warning()
hbox.pack_start(linkbutton, True, True, 0)
linkbutton.connect('button-press-event', self.on_right_click_row, link_this)
hbox.show_all()
return hbox
def on_right_click_row(self, button, event, linked):
idx = self.m_model.index(linked)
if event.button == 3:
m = Gtk.Menu()
item = Gtk.ImageMenuItem(Gtk.STOCK_DELETE)
item.connect('activate', self.on_delete_link, linked)
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_CUT)
item.connect('activate', self.on_cut_link, idx)
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_PASTE)
item.set_sensitive(bool(Editor.clipboard))
item.connect('activate', self.on_paste, idx)
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_EDIT)
item.connect('activate', self.on_edit_linktext, linked)
item.set_sensitive(bool(not isinstance(linked, basestring)))
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_GO_UP)
item.connect('activate', self.on_move_link_up, idx)
item.set_sensitive(bool(idx > 0))
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_GO_DOWN)
item.connect('activate', self.on_move_link_down, idx)
item.set_sensitive(bool(idx < len(self.m_model) - 1))
###
m.append(item)
item = Gtk.ImageMenuItem(Gtk.STOCK_EDIT)
item.set_sensitive(isinstance(linked, unicode))
item.connect('activate', self.on_edit_file, idx)
###
m.append(item)
m.show_all()
m.popup(None, None, None, None, event.button, event.time)
return True
def on_delete_link(self, menuitem, linked):
idx = self.m_model.index(linked)
if id(linked) in editor_of(self).m_page_mapping:
editor_of(self).destroy_window(id(linked))
self.g_link_box.get_children()[idx].destroy()
del self.m_model[idx]
def on_edit_linktext(self, menuitem, linked):
idx = self.m_model.index(linked)
# row is the hbox containing the linkbutton
row = self.g_link_box.get_children()[idx]
linkbutton = row.get_children()[0]
entry = Gtk.Entry()
entry.set_text(linkbutton.get_label())
row.pack_start(entry, True, True, 0)
linkbutton.hide()
entry.show()
entry.grab_focus()
def finish_edit(entry):
linkbutton.set_label(entry.get_text().decode("utf-8"))
linkbutton.get_children()[0].set_alignment(0.0, 0.5)
linkbutton.show()
self.m_model[idx].m_name = entry.get_text().decode("utf-8")
entry.destroy()
sid = entry.connect('activate', finish_edit)
def keydown(entry, event):
if event.keyval == Gdk.KEY_Tab:
finish_edit(entry)
entry.connect('key-press-event', keydown)
def keyup(entry, event):
if event.keyval == Gdk.KEY_Escape:
linkbutton.show()
entry.disconnect(sid)
entry.destroy()
return True
entry.connect('key-release-event', keyup)
def on_edit_file(self, menuitem, linked):
try:
try:
subprocess.call((cfg.get_string("programs/text-editor"),
lessonfile.uri_expand(self.m_model[linked])))
except OSError, e:
raise osutils.BinaryForProgramException("Text editor", cfg.get_string("programs/text-editor"), e)
except osutils.BinaryForProgramException, e:
solfege.win.display_error_message2(e.msg1, e.msg2)
def on_cut(self, btn):
self.m_parent.cut_section(self)
def on_cut_link(self, menuitem, idx):
Editor.clipboard.append(self.m_model[idx])
del self.m_model[idx]
self.g_link_box.get_children()[idx].destroy()
def on_paste(self, btn, idx):
assert Editor.clipboard, "Paste buttons should be insensitive when the clipboard is empty."
pobj = Editor.clipboard.pop()
if isinstance(pobj, pd.LinkList):
mobj = pd.Page(pobj.m_name, [pd.Column(pobj)])
else:
mobj = pobj
if idx == -1:
self.m_model.append(mobj)
self.g_link_box.pack_start(self.create_linkrow(mobj, True, True, 0))
else:
self.m_model.insert(idx, mobj)
row = self.create_linkrow(mobj)
self.g_link_box.pack_start(row, True, True, 0)
self.g_link_box.reorder_child(row, idx)
def on_move_link_up(self, btn, idx):
"""
Move the link one row up.
"""
assert idx > 0
self.m_model[idx], self.m_model[idx - 1] = self.m_model[idx - 1], self.m_model[idx]
self.g_link_box.reorder_child(self.g_link_box.get_children()[idx], idx - 1)
def on_move_link_down(self, btn, idx=None):
"""
Move the link one row down.
"""
self.m_model[idx], self.m_model[idx + 1] = self.m_model[idx + 1], self.m_model[idx]
self.g_link_box.reorder_child(self.g_link_box.get_children()[idx], idx + 1)
class Column(Gtk.VBox):
def __init__(self, model, parent):
Gtk.VBox.__init__(self)
self.set_spacing(gu.hig.SPACE_MEDIUM)
self.m_model = model
self.m_parent = parent
assert isinstance(model, pd.Column)
self.g_section_box = Gtk.VBox()
self.g_section_box.set_spacing(gu.hig.SPACE_MEDIUM)
self.pack_start(self.g_section_box, False, False, 0)
for section in model:
assert isinstance(section, pd.LinkList)
gui_section = Section(section, self)
self.g_section_box.pack_start(gui_section, False, False, 0)
hbox = Gtk.HBox()
self.pack_start(hbox, False, False, 0)
b = Gtk.Button(_("Add section"))
hbox.pack_start(b, False, False, 0)
b.connect('clicked', self.on_add_section)
b = Gtk.Button(stock=Gtk.STOCK_PASTE)
b.connect('clicked', self.on_paste)
Editor.clipboard.register_paste_button(b, pd.LinkList)
hbox.pack_start(b, False, False, 0)
def __del__(self):
logging.debug("Column.__del__")
def cut_section(self, section):
idx = self.g_section_box.get_children().index(section)
Editor.clipboard.append(self.m_model[idx])
del self.m_model[idx]
self.g_section_box.get_children()[idx].destroy()
def remove_section(self, section):
idx = self.g_section_box.get_children().index(section)
del self.m_model[idx]
self.g_section_box.get_children()[idx].destroy()
def on_add_section(self, btn):
# We write "Untitled%s" % "" instead of just "Untitled" here
# since "Untitled%s" is already translated in many languages.
section = pd.LinkList(_("Untitled%s" % ""))
self.m_model.append(section)
gui_section = Section(section, self)
self.g_section_box.pack_start(gui_section, False, False, 0)
gui_section.show_all()
def move_section_down(self, widget, section):
idx = self.g_section_box.get_children().index(section)
if idx < len(self.g_section_box.get_children()) - 1:
self.g_section_box.reorder_child(section, idx + 1)
self.m_model[idx], self.m_model[idx + 1] \
= self.m_model[idx + 1], self.m_model[idx]
self.m_parent.update_buttons()
def move_section_up(self, widget, section):
idx = self.g_section_box.get_children().index(section)
if idx > 0:
self.g_section_box.reorder_child(section, idx - 1)
self.m_model[idx], self.m_model[idx - 1] \
= self.m_model[idx - 1], self.m_model[idx]
self.m_parent.update_buttons()
def on_paste(self, widget):
"""
Paste the clipboard as a new section to this column.
"""
assert Editor.clipboard, "Paste buttons should be insensitive when the clipboard is empty."
assert isinstance(Editor.clipboard[-1], pd.LinkList)
pobj = Editor.clipboard.pop()
self.m_model.append(pobj)
sect = Section(pobj, self)
sect.show_all()
self.g_section_box.pack_start(sect, False, False, 0)
class Page(Gtk.VBox):
def __init__(self, model, parent):
Gtk.VBox.__init__(self)
self.m_model = model
self.m_parent = parent
sc = Gtk.ScrolledWindow()
sc.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.pack_start(sc, True, True, 0)
self.g_column_box = Gtk.HBox()
self.g_column_box.set_spacing(gu.hig.SPACE_LARGE)
self.g_column_box.set_border_width(gu.hig.SPACE_SMALL)
# We pack column into this box
sc.add_with_viewport(self.g_column_box)
self.show_all()
if model:
self.update_from_model()
def __del__(self):
logging.debug("Page.__del__:", self.m_model.m_name)
def on_add_column(self, *btn):
column = pd.Column()
self.m_model.append(column)
gcol = Column(column, self)
gcol.show_all()
self.g_column_box.pack_start(gcol, True, True, 0)
def on_move_section_left(self, button, section):
column_idx = self.g_column_box.get_children().index(section.m_parent)
section_idx = section.m_parent.g_section_box.get_children().index(section)
if column_idx > 0:
to_column = self.g_column_box.get_children()[column_idx - 1]
section.reparent(to_column.g_section_box)
section.m_parent = to_column
to_column.g_section_box.set_child_packing(section, False, False, 0, Gtk.PACK_START)
self.m_model[column_idx - 1].append(self.m_model[column_idx][section_idx])
del self.m_model[column_idx][section_idx]
# Remove the right-most column if we moved the
# last section out of it.
if not self.g_column_box.get_children()[-1].g_section_box.get_children():
assert len(self.m_model[-1]) == 0
del self.m_model[-1]
self.g_column_box.get_children()[-1].destroy()
self.update_buttons()
def on_move_section_right(self, button, section):
# the column we move from
column_idx = self.g_column_box.get_children().index(section.m_parent)
section_idx = section.m_parent.g_section_box.get_children().index(section)
if column_idx == len(self.g_column_box.get_children()) - 1:
self.on_add_column()
to_column = self.g_column_box.get_children()[column_idx + 1]
section.reparent(to_column.g_section_box)
section.m_parent = to_column
to_column.g_section_box.set_child_packing(section, False, False, 0, Gtk.PACK_START)
to_section_idx = len(self.m_model[column_idx + 1])
self.m_model[column_idx + 1].append(self.m_model[column_idx][section_idx])
del self.m_model[column_idx][section_idx]
self.update_buttons()
def update_from_model(self):
for child in self.g_column_box.get_children():
child.destroy()
for column in self.m_model:
self.g_column_box.pack_start(Column(column, self), False, False, 0)
self.g_column_box.show_all()
self.update_buttons()
def update_buttons(self):
num_cols = len(self.g_column_box.get_children())
for col_idx, column in enumerate(self.g_column_box.get_children()):
num_sects = len(column.g_section_box.get_children())
for sect_idx, section in enumerate(column.g_section_box.get_children()):
section.g_move_up_btn.set_sensitive(sect_idx != 0)
section.g_move_down_btn.set_sensitive(sect_idx != num_sects -1)
section.g_move_left_btn.set_sensitive(col_idx != 0)
if [col for col in self.g_column_box.get_children() if not col.g_section_box.get_children()] and col_idx == num_cols - 1:
section.g_move_right_btn.set_sensitive(False)
else:
section.g_move_right_btn.set_sensitive(True)
class Clipboard(list):
def __init__(self, v=[]):
list.__init__(v)
self.m_paste_buttons = []
def pop(self, i=-1):
ret = list.pop(self, i)
self.update_buttons()
return ret
def append(self, obj):
list.append(self, obj)
self.update_buttons()
def register_paste_button(self, button, accepts_types):
button.set_sensitive(bool(self) and isinstance(self[-1], accepts_types))
self.m_paste_buttons.append((button, accepts_types))
def update_buttons(self):
for button, types in self.m_paste_buttons:
button.set_sensitive(bool(self) and isinstance(self[-1], types))
class Editor(Gtk.Window, gu.EditorDialogBase):
savedir = os.path.join(filesystem.user_data(), u'exercises', u'user')
# The clipboard will be shared between all Editor instances
clipboard = Clipboard()
def __init__(self, filename=None):
Gtk.Window.__init__(self)
logging.debug("fpeditor.Editor.__init__(%s)", filename)
gu.EditorDialogBase.__init__(self, filename)
self.set_default_size(800, 600)
self.g_main_box = Gtk.VBox()
self.add(self.g_main_box)
self.g_actiongroup.add_actions([
('GoBack', Gtk.STOCK_GO_BACK, None, None, None, self.go_back),
])
self.setup_toolbar()
self.g_title_hbox = Gtk.HBox()
self.g_title_hbox.set_spacing(gu.hig.SPACE_SMALL)
self.g_title_hbox.set_border_width(gu.hig.SPACE_SMALL)
label = Gtk.Label()
label.set_markup(u"<b>%s</b>" % _("Front page title:"))
self.g_title_hbox.pack_start(label, False, False, 0)
self.g_fptitle = Gtk.Entry()
self.g_title_hbox.pack_start(self.g_fptitle, True, True, 0)
self.g_main_box.pack_start(self.g_title_hbox, False, False, 0)
# This dict maps the windows created for all pages belonging to
# the file.
self.m_page_mapping = {}
self.m_model = None
if filename:
self.load_file(filename)
else:
self.m_model = pd.Page(_("Untitled%s") % self.m_instance_number,
pd.Column())
self.set_not_modified()
self.add_page(Page(self.m_model, self))
self.clipboard.update_buttons()
self.show_all()
self.add_to_instance_dict()
self.g_fptitle.set_text(self.m_model.m_name)
self.g_fptitle.connect('changed', self.on_frontpage_title_changed)
def __del__(self):
logging.debug("fpeditor.Editor.__del__, filename=%s", self.m_filename)
def add_page(self, page):
"""
Add and show the page.
"""
editor_of(self).m_page_mapping[id(page.m_model)] = page
self.g_main_box.pack_start(page, True, True, 0)
self.show_page(page)
def show_page_id(self, page_id):
self.show_page(self.m_page_mapping[page_id])
def show_page(self, page):
"""
Hide the currently visible page, and show PAGE instead.
"""
try:
self.g_visible_page.hide()
except AttributeError:
pass
self.g_visible_page = page
page.show()
if isinstance(page.m_parent, Page):
self.g_title_hbox.hide()
else:
self.g_title_hbox.show()
self.g_ui_manager.get_widget("/Toolbar/GoBack").set_sensitive(
not isinstance(self.g_visible_page.m_parent, Editor))
def go_back(self, *action):
self.show_page(self.g_visible_page.m_parent)
def on_frontpage_title_changed(self, widget):
self.m_model.m_name = widget.get_text()
def setup_toolbar(self):
self.g_ui_manager.insert_action_group(self.g_actiongroup, 0)
uixml = """
<ui>
<toolbar name='Toolbar'>
<toolitem action='GoBack'/>
<toolitem action='New'/>
<toolitem action='Open'/>
<toolitem action='Save'/>
<toolitem action='SaveAs'/>
<toolitem action='Close'/>
<toolitem action='Help'/>
</toolbar>
<accelerator action='Close'/>
<accelerator action='New'/>
<accelerator action='Open'/>
<accelerator action='Save'/>
</ui>
"""
self.g_ui_manager.add_ui_from_string(uixml)
toolbar = self.g_ui_manager.get_widget("/Toolbar")
self.g_main_box.pack_start(toolbar, False, False, 0)
self.g_main_box.reorder_child(toolbar, 0)
self.g_ui_manager.get_widget("/Toolbar").set_style(Gtk.ToolbarStyle.BOTH)
def destroy_window(self, window_id):
"""
Destroy the window with the id 'windowid' and all subwindows.
"""
def do_del(wid):
for key in self.m_page_mapping:
parent = parent_page(self.m_page_mapping[key])
if id(parent) == wid:
do_del(key)
editor_of(self).m_page_mapping[wid].destroy()
del editor_of(self).m_page_mapping[wid]
do_del(window_id)
@staticmethod
def edit_file(fn):
if fn in Editor.instance_dict:
Editor.instance_dict[fn].present()
else:
try:
win = Editor(fn)
win.show()
except IOError, e:
gu.dialog_ok(_("Loading file '%(filename)s' failed: %(msg)s") %
{'filename': fn, 'msg': str(e).decode('utf8', 'replace')})
def load_file(self, filename):
"""
Load a file into a empty, newly created Editor object.
"""
assert self.m_model == None
self.m_model = pd.load_tree(filename, C_locale=True)
self.m_filename = filename
#
if not os.path.isabs(filename):
if not os.access(filename, os.W_OK):
m = Gtk.MessageDialog(self, Gtk.DialogFlags.MODAL, Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE, _("The front page file is write protected in your install. This is normal. If you want to edit a front page file, you have to select one of the files stored in .solfege/exercises/*/ in your home directory."))
m.run()
m.destroy()
self.set_not_modified()
self.set_title(self.m_filename)
def set_not_modified(self):
"""
Store the current state of the data in self.m_orig_dump so that
is_modified() will return False until we make new changes.
"""
io = StringIO.StringIO()
self.m_model.dump(io)
self.m_orig_dump = io.getvalue()
def is_modified(self):
"""
Return True if the data has changed since the last call to
set_not_modified()
"""
io = StringIO.StringIO()
self.m_model.dump(io)
s = io.getvalue()
return s != self.m_orig_dump
@property
def m_changed(self):
return self.is_modified()
def save(self, w=None):
assert self.m_filename
save_location = os.path.split(self.m_filename)[0] + os.sep
fh = pd.FileHeader(1, self.m_model)
fh.save_file(self.m_filename)
self.set_not_modified()
# We do test for solfege.win since it is not available during testing
if hasattr(solfege, 'win'):
solfege.win.load_frontpage()
def on_show_help(self, *w):
return
def get_save_as_dialog(self):
dialog = gu.EditorDialogBase.get_save_as_dialog(self)
ev2 = Gtk.EventBox()
ev2.set_name("DIALOGWARNING2")
ev = Gtk.EventBox()
ev.set_border_width(gu.hig.SPACE_SMALL)
ev2.add(ev)
ev.set_name("DIALOGWARNING")
label = Gtk.Label()
label.set_padding(gu.hig.SPACE_MEDIUM, gu.hig.SPACE_MEDIUM)
ev.add(label)
label.set_markup(_("<b>IMPORTANT:</b> Your front page file <b>must</b> be saved in a subdirectory below the directory named exercises. See the user manual for details."))
dialog.set_extra_widget(ev2)
ev2.show_all()
return dialog
if __name__ == '__main__':
Gtk.link_button_set_uri_hook(lambda a, b: None)
e = Editor()
e.load_file("learningtrees/learningtree.txt")
Gtk.main()
| RannyeriDev/Solfege | solfege/fpeditor.py | Python | gpl-3.0 | 34,999 |
"""
prime_test(n) returns a True if n is a prime number else it returns False
"""
import unittest
def prime_test(n):
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True
def prime_test2(n):
# prime numbers are greater than 1
if n > 1:
# check for factors
for i in range(2, int(n ** 0.5) + 1):
if (n % i) == 0:
# print(num, "is not a prime number")
# print(i, "times", num//i, "is", num)
return False
# print(num, "is a prime number")
return True
# if input number is less than
# or equal to 1, it is not prime
else:
return False
class TestSuite(unittest.TestCase):
def test_prime_test(self):
"""
checks all prime numbers between 2 up to 100.
Between 2 up to 100 exists 25 prime numbers!
"""
counter = 0
for i in range(2, 101):
if prime_test(i):
counter += 1
self.assertEqual(25, counter)
def test_prime_test2(self):
"""
checks all prime numbers between 2 up to 100.
Between 2 up to 100 exists 25 prime numbers!
"""
counter = 0
for i in range(2, 101):
if prime_test(i):
counter += 1
self.assertEqual(25, counter)
if __name__ == "__main__":
unittest.main()
| marcosfede/algorithms | maths/prime_test.py | Python | gpl-3.0 | 1,595 |
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There`s a giant bear here eating a chees cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." %bear
elif door =="2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity =="2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!" | aleksl05/IS-206 | ex31.py | Python | gpl-3.0 | 982 |
import os
print os.path.dirname(os.path.abspath(__file__)) | Art3mk4/python | process/pid.py | Python | gpl-3.0 | 58 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import time
from module.plugins.internal.Plugin import Plugin
from module.plugins.internal.utils import encode
class Captcha(Plugin):
__name__ = "Captcha"
__type__ = "captcha"
__version__ = "0.47"
__status__ = "stable"
__description__ = """Base anti-captcha plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def __init__(self, plugin): #@TODO: Pass pyfile instead plugin, so store plugin's html in its associated pyfile as data
self._init(plugin.pyload)
self.plugin = plugin
self.task = None #: captchaManager task
self.init()
def _log(self, level, plugintype, pluginname, messages):
messages = (self.__name__,) + messages
return self.plugin._log(level, plugintype, self.plugin.__name__, messages)
def recognize(self, image):
"""
Extend to build your custom anti-captcha ocr
"""
pass
def decrypt(self, url, get={}, post={}, ref=False, cookies=True, decode=False, req=None,
input_type='jpg', output_type='textual', ocr=True, timeout=120):
img = self.load(url, get=get, post=post, ref=ref, cookies=cookies, decode=decode, req=req or self.plugin.req)
return self.decrypt_image(img, input_type, output_type, ocr, timeout)
def decrypt_image(self, data, input_type='jpg', output_type='textual', ocr=False, timeout=120):
"""
Loads a captcha and decrypts it with ocr, plugin, user input
:param data: image raw data
:param get: get part for request
:param post: post part for request
:param cookies: True if cookies should be enabled
:param input_type: Type of the Image
:param output_type: 'textual' if text is written on the captcha\
or 'positional' for captcha where the user have to click\
on a specific region on the captcha
:param ocr: if True, ocr is not used
:return: result of decrypting
"""
result = ""
time_ref = ("%.2f" % time.time())[-6:].replace(".", "")
with open(os.path.join("tmp", "captcha_image_%s_%s.%s" % (self.plugin.__name__, time_ref, input_type)), "wb") as tmp_img:
tmp_img.write(encode(data))
if ocr:
if isinstance(ocr, basestring):
OCR = self.pyload.pluginManager.loadClass("captcha", ocr) #: Rename `captcha` to `ocr` in 0.4.10
result = OCR(self.plugin).recognize(tmp_img.name)
else:
result = self.recognize(tmp_img.name)
if not result:
captchaManager = self.pyload.captchaManager
try:
self.task = captchaManager.newTask(data, input_type, tmp_img.name, output_type)
captchaManager.handleCaptcha(self.task)
self.task.setWaiting(max(timeout, 50)) #@TODO: Move to `CaptchaManager` in 0.4.10
while self.task.isWaiting():
self.plugin.check_status()
time.sleep(1)
finally:
captchaManager.removeTask(self.task)
if self.task.error:
self.fail(self.task.error)
elif not self.task.result:
self.plugin.retry_captcha(msg=_("No captcha result obtained in appropriate time"))
result = self.task.result
if not self.pyload.debug:
try:
os.remove(tmp_img.name)
except OSError, e:
self.log_warning(_("Error removing `%s`") % tmp_img.name, e)
# self.log_info(_("Captcha result: ") + result) #@TODO: Remove from here?
return result
def invalid(self):
if not self.task:
return
self.log_warning(_("Invalid captcha"))
self.task.invalid()
def correct(self):
if not self.task:
return
self.log_info(_("Correct captcha"))
self.task.correct()
| fzimmermann89/pyload | module/plugins/internal/Captcha.py | Python | gpl-3.0 | 4,078 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from PyQt4 import QtCore
from PyQt4 import QtGui
from widget import Button, Label
class ToolsWidget(QtGui.QWidget):
""" widget cantaining tools buttons """
def __init__(self, project):
QtGui.QWidget.__init__(self)
self.project = project
### coordinates ###
self.coords = Label("Cursor coordinates")
self.coords.setText("x\ny");
### tools buttons ###
self.penB = Button("pen (1)", "icons/tool_pen.png", self.penClicked, True)
self.penB.setChecked(True)
self.project.toolSetPenSign.connect(self.penClicked)
self.pipetteB = Button("pipette (2)", "icons/tool_pipette.png", self.pipetteClicked, True)
self.fillB = Button("fill (3)", "icons/tool_fill.png", self.fillClicked, True)
self.moveB = Button("move (4)", "icons/tool_move.png", self.moveClicked, True)
self.selectB = Button("select (5)", "icons/tool_select.png", self.selectClicked, True)
### Layout ###
layout = QtGui.QVBoxLayout()
layout.setSpacing(0)
layout.addWidget(self.coords)
layout.addWidget(self.penB)
layout.addWidget(self.pipetteB)
layout.addWidget(self.fillB)
layout.addWidget(self.moveB)
layout.addWidget(self.selectB)
layout.addStretch()
layout.setContentsMargins(6, 0, 6, 0)
self.setLayout(layout)
def penClicked(self):
self.project.tool = "pen"
self.penB.setChecked(True)
self.pipetteB.setChecked(False)
self.fillB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def pipetteClicked(self):
self.project.tool = "pipette"
self.penB.setChecked(False)
self.fillB.setChecked(False)
self.pipetteB.setChecked(True)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def fillClicked(self):
self.project.tool = "fill"
self.fillB.setChecked(True)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def moveClicked(self):
self.project.tool = "move"
self.fillB.setChecked(False)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(True)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def selectClicked(self):
self.project.tool = "select"
self.fillB.setChecked(False)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(True)
self.project.toolChangedSign.emit()
| z-uo/pixeditor | dock_tools.py | Python | gpl-3.0 | 2,923 |
import xml.etree.ElementTree as ElementTree
import os.path
import sys
#
# is there a xmp sidecar file?
#
def get_xmp_filename(filename):
xmpfilename = False
# some xmp sidecar filenames are based on the original filename without extensions like .jpg or .jpeg
filenamewithoutextension = '.' . join(filename.split('.')[:-1])
# check if a xmp sidecar file exists
if os.path.isfile(filename + ".xmp"):
xmpfilename = filename + ".xmp"
elif os.path.isfile(filename + ".XMP"):
xmpfilename = filename + ".XMP"
elif os.path.isfile(filenamewithoutextension + ".xmp"):
xmpfilename = filenamewithoutextension + ".xmp"
elif os.path.isfile(filenamewithoutextension + ".XMP"):
xmpfilename = filenamewithoutextension + ".XMP"
return xmpfilename
# Build path facets from filename
class enhance_xmp(object):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
verbose = False
if 'verbose' in parameters:
if parameters['verbose']:
verbose = True
filename = parameters['filename']
#
# is there a xmp sidecar file?
#
xmpfilename = get_xmp_filename(filename)
if not xmpfilename:
if verbose:
print("No xmp sidecar file")
#
# read meta data of the xmp sidecar file (= xml + rdf)
#
if xmpfilename:
creator = False
headline = False
creator = False
location = False
tags = []
if verbose:
print("Reading xmp sidecar file {}".format(xmpfilename))
try:
# Parse the xmp file with utf 8 encoding
parser = ElementTree.XMLParser(encoding="utf-8")
et = ElementTree.parse(xmpfilename, parser)
root = et.getroot()
# get author
try:
creator = root.findtext(
".//{http://purl.org/dc/elements/1.1/}creator")
if creator:
data['author_ss'] = creator
except BaseException as e:
sys.stderr.write("Exception while parsing creator from xmp {} {}".format(
xmpfilename, e.args[0]))
# get headline
try:
headline = root.findtext(
".//{http://ns.adobe.com/photoshop/1.0/}Headline")
if headline:
data['title_txt'] = headline
except BaseException as e:
sys.stderr.write("Exception while parsing headline from xmp {} {}".format(
xmpfilename, e.args[0]))
# get location
try:
location = root.findtext(
".//{http://iptc.org/std/Iptc4xmpCore/1.0/xmlns/}Location")
if location:
if 'locations_ss' in data:
data['locations_ss'].append(location)
else:
data['locations_ss'] = [location]
except BaseException as e:
sys.stderr.write("Exception while parsing location from xmp {} {}".format(
xmpfilename, e.args[0]))
# get tags (named "subject")
try:
for tag in root.findall(".//{http://purl.org/dc/elements/1.1/}subject/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Bag/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}li"):
try:
if 'tag_ss' in data:
data['tag_ss'].append(tag.text)
else:
data['tag_ss'] = [tag.text]
except BaseException as e:
sys.stderr.write("Exception while parsing a tag from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing tags from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing xmp {} {}".format(
xmpfilename, e.args[0]))
return parameters, data
| opensemanticsearch/open-semantic-etl | src/opensemanticetl/enhance_xmp.py | Python | gpl-3.0 | 4,568 |
"""
The mean of three integers A, B and C is (A + B + C)/3. The median of three integers is the one that would be in the
middle if they are sorted in non-decreasing order. Given two integers A and B, return the minimum possible integer C
such that the mean and the median of A, B and C are equal.
Input
Each test case is given in a single line that contains two integers A and B (1 ≤ A ≤ B ≤ 109). The last test case is
followed by a line containing two zeros.
Output
For each test case output one line containing the minimum possible integer C such that the mean and the median of A, B
and C are equal.
"""
A = int
B = int
C = int
while B != 0 and C != 0:
B, C = map(int, input().split())
if 1 <= B <= C <= 10 ** 9:
A = 2 * B - C
print(A)
| deyvedvm/cederj | urionlinejudge/python/1379.py | Python | gpl-3.0 | 781 |
""" py.test test of ExecutorDispatcher
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=protected-access
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.ExecutorDispatcher import (
ExecutorState,
ExecutorQueues,
)
execState = ExecutorState()
execState.addExecutor(1, "type1", 2)
eQ = ExecutorQueues()
def test_ExecutorState():
"""test of ExecutorState"""
assert execState.freeSlots(1) == 2
assert execState.addTask(1, "t1") == 1
assert execState.addTask(1, "t1") == 1
assert execState.addTask(1, "t2") == 2
assert execState.freeSlots(1) == 0
assert execState.full(1)
assert execState.removeTask("t1") == 1
assert execState.freeSlots(1) == 1
assert execState.getFreeExecutors("type1") == {1: 1}
assert execState.getTasksForExecutor(1) == {"t2"}
assert execState.removeExecutor(1)
assert execState._internals()
def test_execQueues():
"""test of ExecutorQueues"""
for y in range(2):
for i in range(3):
assert eQ.pushTask("type%s" % y, "t%s%s" % (y, i)) == i + 1
assert "DONE IN"
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": ["t00", "t01", "t02"], "type1": ["t10", "t11", "t12"]}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {
"t00": "type0",
"t01": "type0",
"t02": "type0",
"t10": "type1",
"t11": "type1",
"t12": "type1",
}
assert eQ.pushTask("type0", "t01") == 3
assert eQ.getState()
assert eQ.popTask("type0")[0] == "t00"
assert eQ.pushTask("type0", "t00", ahead=True) == 3
assert eQ.popTask("type0")[0] == "t00"
assert eQ.deleteTask("t01")
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": ["t02"], "type1": ["t10", "t11", "t12"]}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {
"t02": "type0",
"t10": "type1",
"t11": "type1",
"t12": "type1",
}
assert eQ.getState()
assert eQ.deleteTask("t02")
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": [], "type1": ["t10", "t11", "t12"]}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {
"t10": "type1",
"t11": "type1",
"t12": "type1",
}
assert eQ.getState()
for i in range(3):
assert eQ.popTask("type1")[0] == "t1%s" % i
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": [], "type1": []}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {}
assert eQ.pushTask("type0", "t00") == 1
assert eQ.popTask("type0") == ("t00", "type0")
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": [], "type1": []}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {}
assert eQ.pushTask("type0", "t00") == 1
assert eQ.deleteTask("t00")
res_internals = eQ._internals()
assert res_internals["queues"] == {"type0": [], "type1": []}
assert set(res_internals["lastUse"].keys()) == {"type0", "type1"}
assert res_internals["taskInQueue"] == {}
assert not eQ.deleteTask("t00")
| ic-hep/DIRAC | src/DIRAC/Core/Utilities/test/Test_ExecutorDispatcher.py | Python | gpl-3.0 | 3,478 |
# This file is part of PARPG.
#
# PARPG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PARPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PARPG. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the core interface to the dialogue subsystem used to process player
L{Dialogues<Dialogue>} with NPCs.
"""
import logging
from parpg.common.utils import dedent_chomp
if (__debug__):
from collections import Sequence, MutableMapping
from parpg.dialogue import Dialogue
logger = logging.getLogger('dialogueprocessor')
class DialogueProcessor(object):
"""
Primary interface to the dialogue subsystem used to initiate and process a
L{Dialogue} with an NPC.
To begin a dialogue with an NPC a L{DialogueProcessor} must first be
instantiated with the dialogue data to process and a dictionary of Python
objects defining the game state for testing of response conditionals. The
L{initiateDialogue} must be called to initialized the L{DialogueProcessor},
and once it is initialized processing of
L{DialogueSections<DialogueSection>} and
L{DialogueResponses<DialogueResponse>} can be initiated via the
L{continueDialogue} and L{reply} class methods.
The state of dialogue processing is stored via the
L{dialogue_section_stack} class attribute, which stores a list of
L{DialogueSections<DialogueSection>} that have been or are currently being
processed. Each time L{reply} is called with a L{DialogueResponse} its
next_section_id attribute is used to select a new L{DialogueSection} from
the L{dialogue}. The selected L{DialogueSection} is then pushed
onto the end of the L{dialogue_section_stack}, ready to be processed via
L{continueDialogue}. The exception to this rule occurs when L{reply} is
called with a L{DialogueResponse} whose next_section_id attribute is "end"
or "back". "end" terminates the dialogue as described below, while "back"
removes the last L{DialogueSection} on the L{dialogue_section_stack}
effectively going back to the previous section of dialogue.
The L{DialogueProcessor} terminates dialogue processing once L{reply} is
called with a L{DialogueResponse} whose next_section_id == 'end'.
Processing can also be manually terminated by calling the L{endDialogue}
class method.
@note: See the dialogue_demo.py script for a complete example of how the
L{DialogueProcessor} can be used.
@ivar dialogue: dialogue data currently being processed.
@type dialogue: L{Dialogue}
@ivar dialogue_section_stack: sections of dialogue that have been or are
currently being processed.
@type dialogue_section_stack: list of L{DialogueSections<DialogueSection>}
@ivar game_state: objects defining the game state that should be made
available for testing L{DialogueResponse} conditionals.
@type game_state: dict of Python objects
@ivar in_dialogue: whether a dialogue has been initiated.
@type in_dialogue: Bool
Usage:
>>> game_state = {'pc': player_character, 'quest': quest_engine}
>>> dialogue_processor = DialogueProcessor(dialogue, game_state)
>>> dialogue_processor.initiateDialogue()
>>> while dialogue_processor.in_dialogue:
... valid_responses = dialogue_processor.continueDialogue()
... response = choose_response(valid_responses)
... dialogue_processor.reply(response)
"""
_logger = logging.getLogger('dialogueengine.DialogueProcessor')
def dialogue():
def fget(self):
return self._dialogue
def fset(self, dialogue):
assert isinstance(dialogue, Dialogue), \
'{0} does not implement Dialogue interface'.format(dialogue)
self._dialogue = dialogue
return locals()
dialogue = property(**dialogue())
def dialogue_section_stack():
def fget(self):
return self._dialogue_section_stack
def fset(self, new_value):
assert isinstance(new_value, Sequence) and not \
isinstance(new_value, basestring), \
'dialogue_section_stack must be a Sequence, not {0}'\
.format(new_value)
self._dialogue_section_stack = new_value
return locals()
dialogue_section_stack = property(**dialogue_section_stack())
def game_state():
def fget(self):
return self._game_state
def fset(self, new_value):
assert isinstance(new_value, MutableMapping),\
'game_state must be a MutableMapping, not {0}'\
.format(new_value)
self._game_state = new_value
return locals()
game_state = property(**game_state())
def in_dialogue():
def fget(self):
return self._in_dialogue
def fset(self, value):
assert isinstance(value, bool), '{0} is not a bool'.format(value)
self._in_dialogue = value
return locals()
in_dialogue = property(**in_dialogue())
def __init__(self, dialogue, game_state):
"""
Initialize a new L{DialogueProcessor} instance.
@param dialogue: dialogue data to process.
@type dialogue: L{Dialogue}
@param game_state: objects defining the game state that should be made
available for testing L{DialogueResponse} conditions.
@type game_state: dict of objects
"""
self._dialogue_section_stack = []
self._dialogue = dialogue
self._game_state = game_state
self._in_dialogue = False
def getDialogueGreeting(self):
"""
Evaluate the L{RootDialogueSections<RootDialogueSection>} conditions
and return the valid L{DialogueSection} which should be displayed
first.
@return: Valid root dialogue section.
@rtype: L{DialogueSection}
@raise: RuntimeError - evaluation of a DialogueGreeting condition fails
by raising an exception (e.g. due to a syntax error).
"""
dialogue = self.dialogue
dialogue_greeting = None
for greeting in dialogue.greetings:
try:
condition_met = eval(greeting.condition, self.game_state)
except Exception as exception:
error_message = dedent_chomp('''
exception raised in DialogueGreeting {id} condition:
{exception}
''').format(id=greeting.id, exception=exception)
self._logger.error(error_message)
if (condition_met):
dialogue_greeting = greeting
if (dialogue_greeting is None):
dialogue_greeting = dialogue.default_greeting
return dialogue_greeting
def initiateDialogue(self):
"""
Prepare the L{DialogueProcessor} to process the L{Dialogue} by pushing
the starting L{DialogueSection} onto the L{dialogue_section_stack}.
@raise RuntimeError: Unable to determine the root L{DialogueSection}
defined by the L{Dialogue}.
"""
if (self.in_dialogue):
self.endDialogue()
dialogue_greeting = self.getDialogueGreeting()
self.dialogue_section_stack.append(dialogue_greeting)
self.in_dialogue = True
self._logger.info('initiated dialogue {0}'.format(self.dialogue))
def continueDialogue(self):
"""
Process the L{DialogueSection} at the top of the
L{dialogue_section_stack}, run any L{DialogueActions<DialogueActions>}
it contains and return a list of valid
L{DialogueResponses<DialogueResponses> after evaluating any response
conditionals.
@returns: valid responses.
@rtype: list of L{DialogueResponses<DialogueResponse>}
@raise RuntimeError: Any preconditions are not met.
@precondition: dialogue has been initiated via L{initiateDialogue}.
"""
if (not self.in_dialogue):
error_message = dedent_chomp('''
dialogue has not be initiated via initiateDialogue yet
''')
raise RuntimeError(error_message)
current_dialogue_section = self.getCurrentDialogueSection()
self.runDialogueActions(current_dialogue_section)
valid_responses = self.getValidResponses(current_dialogue_section)
return valid_responses
def getCurrentDialogueSection(self):
"""
Return the L{DialogueSection} at the top of the
L{dialogue_section_stack}.
@returns: section of dialogue currently being processed.
@rtype: L{DialogueSection}
@raise RuntimeError: Any preconditions are not met.
@precondition: dialogue has been initiated via L{initiateDialogue} and
L{dialogue_section_stack} contains at least one L{DialogueSection}.
"""
if (not self.in_dialogue):
error_message = dedent_chomp('''
getCurrentDialogueSection called but the dialogue has not been
initiated yet
''')
raise RuntimeError(error_message)
try:
current_dialogue_section = self.dialogue_section_stack[-1]
except IndexError:
error_message = dedent_chomp('''
getCurrentDialogueSection called but no DialogueSections are in
the stack
''')
raise RuntimeError(error_message)
return current_dialogue_section
def runDialogueActions(self, dialogue_node):
"""
Execute all L{DialogueActions<DialogueActions>} contained by a
L{DialogueSection} or L{DialogueResponse}.
@param dialogue_node: section of dialogue or response containing the
L{DialogueActions<DialogueAction>} to execute.
@type dialogue_node: L{DialogueNode}
"""
self._logger.info('processing commands for {0}'.format(dialogue_node))
for command in dialogue_node.actions:
try:
command(self.game_state)
except (Exception,) as error:
self._logger.error('failed to execute DialogueAction {0}: {1}'
.format(command.keyword, error))
# TODO Technomage 2010-11-18: Undo previous actions when an
# action fails to execute.
else:
self._logger.debug('ran {0} with arguments {1}'
.format(getattr(type(command), '__name__'),
command.arguments))
def getValidResponses(self, dialogue_section):
"""
Evaluate all L{DialogueResponse} conditions for a L{DialogueSection}
and return a list of valid responses.
@param dialogue_section: section of dialogue containing the
L{DialogueResponses<DialogueResponse>} to process.
@type dialogue_section: L{DialogueSection}
@return: responses whose conditions were met.
@rtype: list of L{DialogueResponses<DialogueResponse>}
"""
valid_responses = []
for dialogue_response in dialogue_section.responses:
condition = dialogue_response.condition
try:
condition_met = condition is None or \
eval(condition, self.game_state)
except (Exception,) as exception:
error_message = dedent_chomp('''
evaluation of condition {condition} for {response} failed
with error: {exception}
''').format(condition=dialogue_response.condition,
response=dialogue_response, exception=exception)
self._logger.error(error_message)
else:
self._logger.debug(
'condition "{0}" for {1} evaluated to {2}'
.format(dialogue_response.condition, dialogue_response,
condition_met)
)
if (condition_met):
valid_responses.append(dialogue_response)
return valid_responses
def reply(self, dialogue_response):
"""
Reply with a L{DialogueResponse}, execute the
L{DialogueActions<DialogueAction>} it contains and push the next
L{DialogueSection} onto the L{dialogue_section_stack}.
@param dialogue_response: response to reply with.
@type dialogue_response: L{DialogueReponse}
@raise RuntimeError: Any precondition is not met.
@precondition: L{initiateDialogue} must be called before this method
is used.
"""
if (not self.in_dialogue):
error_message = dedent_chomp('''
reply cannot be called until the dialogue has been initiated
via initiateDialogue
''')
raise RuntimeError(error_message)
self._logger.info('replied with {0}'.format(dialogue_response))
# FIXME: Technomage 2010-12-11: What happens if runDialogueActions
# raises an error?
self.runDialogueActions(dialogue_response)
next_section_id = dialogue_response.next_section_id
if (next_section_id == 'back'):
if (len(self.dialogue_section_stack) == 1):
error_message = dedent_chomp('''
attempted to run goto: back action but stack does not
contain a previous DialogueSection
''')
raise RuntimeError(error_message)
else:
try:
self.dialogue_section_stack.pop()
except (IndexError,):
error_message = dedent_chomp('''
attempted to run goto: back action but the stack was
empty
''')
raise RuntimeError(error_message)
else:
self._logger.debug(
'ran goto: back action, restored last DialogueSection'
)
elif (next_section_id == 'end'):
self.endDialogue()
self._logger.debug('ran goto: end action, ended dialogue')
else:
try:
next_dialogue_section = \
self.dialogue.sections[next_section_id]
except KeyError:
error_message = dedent_chomp('''
{0} is not a recognized goto: action or DialogueSection
identifier
''').format(next_section_id)
raise RuntimeError(error_message)
else:
self.dialogue_section_stack.append(next_dialogue_section)
def endDialogue(self):
"""
End the current dialogue and clean up any resources in use by the
L{DialogueProcessor}.
"""
self.dialogue_section_stack = []
self.in_dialogue = False
| parpg/parpg | parpg/dialogueprocessor.py | Python | gpl-3.0 | 15,770 |
class SnakeGame:
def __init__(self, width, height, food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.w, self.h = width, height
self.food = food
self.idx = 0
self.snake = collections.deque([(0, 0)])
self.snake_set = set([(0, 0)])
self.game_state = True
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if not self.game_state: return -1
directions = {'U': (-1, 0), 'L': (0, -1), 'R': (0, 1), 'D': (1, 0)}
head = self.snake[0]
n_x = head[0] + directions[direction][0]
n_y = head[1] + directions[direction][1]
if not (0 <= n_x < self.h and 0 <= n_y < self.w):
self.game_state = False
return -1
if (n_x, n_y) != self.snake[-1] and (n_x, n_y) in self.snake_set:
self.game_state = False
return -1
if self.idx < len(self.food) and self.food[self.idx] == [n_x, n_y]:
self.idx += 1
else:
self.snake_set.remove(self.snake[-1])
self.snake.pop()
self.snake.appendleft((n_x, n_y))
self.snake_set.add((n_x, n_y))
return self.idx
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) | YiqunPeng/Leetcode-pyq | solutions/353DesignSnakeGame.py | Python | gpl-3.0 | 2,009 |
from Start_up import*
from Player import Player, HealthBar
from Bullet import Bullet
from Enemy import Enemy
from Stopper import Stopper
from Particle import Particle
from Stars import Star, create_stars
from Package import EnemyDrop, HealthPack
from Notes import NoteController
class Game:
def __init__(self, play, health_bar):
self.own_state = game_state
self.next_state = self.own_state
self.player = play
self.enemy_id_tracker = 0
self.left_stop = Stopper((-30, 0), True, False)
self.right_stop = Stopper((width, 0), False, True)
self.bullet_list = []
self.enemy_list = []
self.kill_list = []
self.particle_list = []
self.package_list = []
self.to_update = []
self.to_display = []
self.to_text = []
self.star_list = create_stars("game")
self.info_bar = pygame.Surface((width, 30))
self.info_bar.fill(main_theme)
self.info_bar.set_alpha(100)
self.health_bar = health_bar
self.note_controller = NoteController((width - 10, 40))
def reset(self, play):
self.own_state = game_state
self.next_state = self.own_state
self.player = play
self.health_bar = HealthBar(self.player)
self.enemy_id_tracker = 0
self.bullet_list = []
self.enemy_list = []
self.kill_list = []
self.particle_list = []
self.package_list = []
self.to_update = []
self.to_display = []
self.to_text = []
self.star_list = create_stars("game")
self.note_controller = NoteController((width - 10, 40))
def update_all(self):
# a check for all update elements, providing the
# relevant information for the objects update
for x in range(0, len(self.to_update)):
if isinstance(self.to_update[x], Particle):
self.to_update[x].update()
elif isinstance(self.to_update[x], Star):
self.to_update[x].update()
elif isinstance(self.to_update[x], Enemy):
self.to_update[x].update(self.bullet_list)
elif isinstance(self.to_update[x], Player):
self.to_update[x].update(self.package_list, self.note_controller, self.bullet_list, self.health_bar)
elif isinstance(self.to_update[x], Bullet):
self.to_update[x].update()
elif isinstance(self.to_update[x], EnemyDrop):
self.to_update[x].update()
elif isinstance(self.to_update[x], HealthPack):
self.to_update[x].update()
elif isinstance(self.to_update[x], NoteController):
self.to_update[x].update()
elif isinstance(self.to_update[x], Stopper):
self.to_update[x].update(self.bullet_list, self.enemy_list, self.player, self.note_controller)
elif isinstance(self.to_update[x], HealthBar):
self.to_update[x].update()
def display_all(self):
# fill screen with black and display all game information
main_s.fill((20, 20, 20))
for x in range(0, len(self.to_display)):
if isinstance(self.to_display[x], Player):
if self.to_display[x].alive:
self.to_display[x].display()
else:
self.to_display[x].display()
main_s.blit(self.info_bar, (0, 0))
main_s.blit(font.render("ESC TO PAUSE", True, (255, 255, 255)), (width - 115, 5))
def text_all(self):
# display all text needed at the top of the screen
total_length = 0
for x in range(0, len(self.to_text)):
main_s.blit(font.render(str(self.to_text[x]), True, (255, 255, 255)), (5 + (15 * total_length), 5))
total_length += len(self.to_text[x])
def hit_particles(self, rect_hit, colour):
# create particles with random speeds, directions and sizes
numbers_z = range(-10, 10)
numbers_nz = range(-10, -1) + range(1, 10)
for x in range(0, settings.loaded_enemy_particles):
x_temp = random.choice(numbers_z)
y_temp = random.choice(numbers_z)
dy = y_temp
dx = x_temp
# make sure that dx and dy are not both 0 so that there
# are no particles static on the screen
if x_temp == 0 and y_temp != 0:
dy = y_temp
dx = x_temp
if y_temp == 0 and x_temp != 0:
dy = y_temp
dx = x_temp
if x_temp == y_temp == 0:
dy = random.choice(numbers_nz)
dx = random.choice(numbers_nz)
particle = Particle(random.randint(1, 3), (dx, dy), rect_hit, colour)
self.particle_list.append(particle)
def remove_particles(self):
# remove particles that are no longer colliding with the screen
# removed from the end first so that the list does not effect
# later elements to remove
for x in range(0, len(self.particle_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.particle_list[len(self.particle_list) - x - 1]):
del self.particle_list[len(self.particle_list) - x - 1]
except:
# break in case [len(p_list) - x - 1] is out of range
break
def remove_stars(self):
# remove stars that are no longer colliding with the screen
# removed from the end first so that the list does not effect
# later elements to remove
for x in range(0, len(self.star_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.star_list[len(self.star_list) - x - 1]):
del self.star_list[len(self.star_list) - x - 1]
except:
# break in case [len(p_list) - x - 1] is out of range
break
def remove_packages(self):
print(len(self.package_list))
for i in range(0, len(self.package_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.package_list[len(self.package_list) - i - 1]):
del self.package_list[len(self.package_list) - i - 1]
except IndexError:
# break in case [len(p_list) - x - 1] is out of range
break
def check_enemy_alive(self):
# add enemies to a removal list if they are dead
for x in range(0, len(self.enemy_list)):
if self.enemy_list[x].dead:
self.kill_list.append(self.enemy_list[x])
def kill_enemies(self):
# remove enemies from enemy list that are on the kill list
# create a package and give the player the coins dropped
# create particles originating from the now dead enemy
# create a notification for the user saying they have found money
for x in range(0, len(self.kill_list)):
for y in range(0, len(self.enemy_list)):
try:
if self.kill_list[len(self.kill_list) - x - 1].id == self.enemy_list[len(self.enemy_list) - y - 1].id:
del self.kill_list[len(self.kill_list) - x - 1]
self.note_controller.add_note("+ " + str(self.enemy_list[len(self.enemy_list) - y - 1].money * self.player.money_collection) + " coins", main_theme)
self.player.get_coins(self.enemy_list[len(self.enemy_list) - y - 1].money)
self.hit_particles(self.enemy_list[len(self.enemy_list) - y - 1].rect, white)
self.random_enemy_drop(self.enemy_list[len(self.enemy_list) - y - 1].dx,
self.enemy_list[len(self.enemy_list) - y - 1].rect.center)
del self.enemy_list[len(self.enemy_list) - y - 1]
break
except:
break
def random_event_enemy(self):
# create an enemy if the random variable is 1
if random.randint(1, settings.loaded_enemy_chance) == 1:
enemy = Enemy(self.enemy_id_tracker)
self.enemy_list.append(enemy)
self.enemy_id_tracker += 1
def random_event_star(self):
if random.randint(1, star_chance) == 1:
# create a star starting at the right and set to move to the left
s = Star(width + 10, # x pos (start a little off screen)
random.randint(0, height), # y pos
random.randint(1, 2), # dx
0) # dy
self.star_list.append(s)
def random_enemy_drop(self, speed, pos):
# random chance that package will be created
if random.randint(1, package_chance) == 1:
e = EnemyDrop(speed, pos)
self.package_list.append(e)
def random_health_pack(self):
pos = (width + 10, random.randint(20, height - 20))
# random chance that package will be created
if random.randint(1, package_chance * 50) == 1:
h = HealthPack(-random.randint(1, 2), pos)
self.package_list.append(h)
def input(self, event_list):
# player input
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
self.player.move(-1)
if key[pygame.K_DOWN]:
self.player.move(1)
if key[pygame.K_SPACE]:
self.bullet_list = self.player.shoot(self.bullet_list)
for event in event_list:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.next_state = pause_state
def run(self, event_list):
# run all game functions
self.input(event_list)
self.random_event_enemy()
self.random_event_star()
self.random_health_pack()
self.check_enemy_alive()
self.kill_enemies()
self.remove_particles()
self.remove_stars()
#self.remove_packages()
# reload all lists
self.to_display = self.package_list + self.star_list + self.bullet_list + self.enemy_list + \
self.particle_list + [self.player, self.note_controller, self.health_bar]
self.to_update = [self.player, self.note_controller, self.left_stop, self.right_stop] + \
self.package_list + self.star_list + self.bullet_list + self.enemy_list + self.particle_list
self.to_text = [str(self.player.money) + " COINS"]
if not self.player.alive:
self.hit_particles(self.player.rect, main_theme)
self.next_state = game_over_state
if self.player.hit:
self.health_bar.update_health()
self.hit_particles(self.player.rect, main_theme)
if self.player.health_update:
self.health_bar.update_health()
self.player.health_update = False
self.update_all()
self.display_all()
self.text_all()
#print(len(self.package_list))
# by default return the games own state value
# otherwise this will be changed in the user input
return self.next_state
| YJoe/SpaceShips | Desktop/Python/space_scroll/Game.py | Python | gpl-3.0 | 11,279 |
from trifle.server.views.frontend import frontend
from trifle.server.views.api import api
from trifle.server.views.monitor import monitor
from trifle.server.views.configure import configure
| gloaec/trifle | src/trifle/server/views/__init__.py | Python | gpl-3.0 | 200 |
class Rule:
'''
规则父类
'''
def action(self,block,handler):
'''
加标记
'''
handler.start(self.type)
handler.feed(block)
handler.end(self.type)
return True
class HeadingRule(Rule):
'''
一号标题规则
'''
type='heading'
def condition(self,block):
'''
判断文本块是否符合规则
'''
return not '\n' in block and len(block)<=70 and not block[-1]==':'
class TitleRule(HeadingRule):
'''
二号标题规则
'''
type='title'
first=True
def condintion(self,block):
if not self.first:
return False
self.first=False
return HeadingRule.condition(self,block)
class ListItemRule(Rule):
'''
列表项规则
'''
type='listitem'
def condition(self,block):
return block[0]=='-'
def action(self,block,handler):
handler.start(self.type)
handler.feed(block[1:].strip())
handler.end(self.type)
return True
class ListRule(ListItemRule):
'''
列表规则
'''
type ='list'
inside=False
def condition(self,block):
return True
def action(self,block,handler):
if not self.inside and ListItemRule.condition(self,block):
handler.start(self.type)
elif self.inside and not ListItemRule.condition(self,block):
handler.end(self,type)
self.inside=False
return False
class ParagraphRule(Rule):
'''
段落规则
'''
type='paragraph'
def condition(self,block):
return True | JNero/shiyanlou | wenbenjiexiqi/rules.py | Python | gpl-3.0 | 1,355 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-01 12:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0030_github_user'),
]
operations = [
migrations.AddField(
model_name='linkedin_user',
name='number_all_repos',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos1',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos2',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='number_repos3',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='linkedin_user',
name='technology1',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='linkedin_user',
name='technology2',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='linkedin_user',
name='technology3',
field=models.CharField(default='', max_length=50),
),
]
| hiezust/teask | website/migrations/0031_auto_20170601_1502.py | Python | gpl-3.0 | 1,475 |
import json
import os
import re
import shutil
import xmltodict
import zipfile
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext_lazy as _
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication, Authentication
from tastypie.authorization import ReadOnlyAuthorization, Authorization
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from api.serializers import CourseJSONSerializer
from oppia.models import Tracker, Course, CourseCategory
from oppia.signals import course_downloaded
STR_COURSE_NOT_FOUND = _(u"Course not found")
def get_course_from_shortname(resource, bundle, lookup):
object_list = resource.apply_filters(bundle.request,
{'shortname': lookup})
if len(object_list) <= 0:
raise resource._meta.object_class.DoesNotExist(
"Couldn't find an course with shortname '%s'." % (lookup))
elif len(object_list) > 1:
raise MultipleObjectsReturned(
"More than one course with shortname '%s'." % (lookup))
return object_list
class CourseResource(ModelResource):
class Meta:
queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
fields = ['id',
'title',
'version',
'shortname',
'priority',
'is_draft',
'description',
'author',
'username',
'organisation']
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
else:
return super().obj_get(bundle, **kwargs)
def get_object_list(self, request):
if request.user.is_staff:
return Course.objects.filter(is_archived=False) \
.order_by('-priority', 'title')
else:
return Course.objects.filter(is_archived=False) \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user))) \
.order_by('-priority', 'title')
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_course'), name="api_download_course"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/activity%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_activity'),
name="api_download_activity"),
]
def get_course(self, request, **kwargs):
self.is_authenticated(request)
self.throttle_check(request)
pk = kwargs.pop('pk', None)
try:
if request.user.is_staff:
course = self._meta.queryset.get(pk=pk, is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(pk=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
except ValueError:
try:
if request.user.is_staff:
course = self._meta.queryset.get(shortname=pk,
is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(shortname=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
return course
def download_course(self, request, **kwargs):
course = self.get_course(request, **kwargs)
file_to_download = course.getAbsPath()
has_completed_trackers = Tracker.has_completed_trackers(course,
request.user)
try:
if has_completed_trackers:
file_to_download = os.path.join(
settings.COURSE_UPLOAD_DIR,
"temp",
str(request.user.id) + "-" + course.filename)
shutil.copy2(course.getAbsPath(), file_to_download)
course_zip = zipfile.ZipFile(file_to_download, 'a')
if has_completed_trackers:
course_zip.writestr(course.shortname + "/tracker.xml",
Tracker.to_xml_string(course,
request.user))
course_zip.close()
binary_file = open(file_to_download, 'rb')
response = HttpResponse(binary_file.read(),
content_type='application/zip')
binary_file.close()
response['Content-Length'] = os.path.getsize(file_to_download)
response['Content-Disposition'] = \
'attachment; filename="%s"' % (course.filename)
except IOError:
raise Http404(STR_COURSE_NOT_FOUND)
course_downloaded.send(sender=self, course=course, request=request)
return response
def download_activity(self, request, **kwargs):
course = self.get_course(request, **kwargs)
return HttpResponse(Tracker.to_xml_string(course,
request.user),
content_type='text/xml')
def dehydrate(self, bundle):
bundle.data['url'] = bundle.request.build_absolute_uri(
bundle.data['resource_uri'] + 'download/')
# make sure title is shown as json object (not string representation \
# of one)
bundle.data['title'] = json.loads(bundle.data['title'])
try:
bundle.data['description'] = json.loads(bundle.data['description'])
except json.JSONDecodeError:
pass
course = Course.objects.get(pk=bundle.obj.pk)
if course and course.user:
bundle.data['author'] = course.user.first_name \
+ " " \
+ course.user.last_name
bundle.data['username'] = course.user.username
bundle.data['organisation'] = course.user.userprofile.organisation
return bundle
class CourseCategoryResource(ModelResource):
course = fields.ToOneField('api.resource.course.CourseResource',
'course',
full=True)
class Meta:
queryset = CourseCategory.objects.all()
allowed_methods = ['get']
resource_name = 'coursetag'
fields = ['id', 'course', 'category']
include_resource_uri = False
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
always_return_data = True
class CourseStructureResource(ModelResource):
class Meta:
queryset = Course.objects.filter(is_draft=False, is_archived=False)
resource_name = 'coursestructure'
allowed_methods = ['get']
fields = ['shortname',
'id',
'structure']
authentication = Authentication()
authorization = Authorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
return_obj = object_list[0]
else:
return_obj = super().obj_get(bundle, **kwargs)
# check the module.xml is on disk
path = os.path.join(settings.MEDIA_ROOT,
'courses',
return_obj.shortname,
'module.xml')
if not os.path.isfile(path):
raise self._meta.object_class.DoesNotExist()
return return_obj
def dehydrate(self, bundle):
path = os.path.join(settings.MEDIA_ROOT,
'courses',
bundle.obj.shortname,
'module.xml')
with open(path) as fd:
doc = xmltodict.parse(fd.read())
bundle.data['structure'] = json.dumps(doc)
return bundle
| DigitalCampus/django-oppia | api/resources/course.py | Python | gpl-3.0 | 9,945 |
"""Session class factory methods."""
from __future__ import unicode_literals
import logging
from cachecontrol import CacheControlAdapter
from cachecontrol.cache import DictCache
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def add_cache_control(session, cache_control_config):
"""Add cache_control adapter to session object."""
adapter = CacheControlAdapter(
DictCache(),
cache_etags=cache_control_config.get('cache_etags', True),
serializer=cache_control_config.get('serializer', None),
heuristic=cache_control_config.get('heuristic', None),
)
session.mount('http://', adapter)
session.mount('https://', adapter)
session.cache_controller = adapter.controller
| pymedusa/Medusa | medusa/session/factory.py | Python | gpl-3.0 | 748 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
from subprocess import check_call, CalledProcessError
import shlex
import sys
import logging
log = logging.getLogger( __name__ )
def novo_sort( bam_filename, output_filename ):
cmdline_str = "novosort -c 8 -m 8G -s -f {} -o {}".format( bam_filename, output_filename )
cmdline = newSplit(cmdline_str)
try:
check_call(cmdline)
except CalledProcessError:
print("Error running the nova-sort", file=sys.stderr)
def newSplit(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def main():
parser = argparse.ArgumentParser(description="Re-sorting aligned files by read position")
parser.add_argument('output_filename')
parser.add_argument('--bam_filename')
args = parser.parse_args()
novo_sort(args.bam_filename, args.output_filename)
if __name__ == "__main__":
main()
| SANBI-SA/tools-sanbi-uwc | tools/novo_sort/novo_sort.py | Python | gpl-3.0 | 986 |
import sched, time
s = sched.scheduler(time.time, time.sleep)
def print_time():
print ("From print_time", time.time())
def print_some_times():
print (time.time())
s.enter(5, 1, print_time, ())
s.enter(10, 1, print_time, ())
s.run()
print (time.time())
print_some_times() | selfbus/software-arm-incubation | sensors/misc/raincenter-bim112/Phyton Raincenter Tests/test.py | Python | gpl-3.0 | 309 |
import bpy
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 27.9
camera.units = 'MILLIMETERS'
camera.focal_length = 24.0
camera.pixel_aspect = 1
camera.k1 = 0.0
camera.k2 = 0.0
camera.k3 = 0.0
| cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/presets/tracking_camera/Canon_1D.py | Python | gpl-3.0 | 219 |
import os
from os import path
from datetime import datetime
import getpass
import re
import time
from fabric.context_managers import cd, hide, settings
from fabric.operations import require, prompt, get, run, sudo, local
from fabric.state import env
from fabric.contrib import files
from fabric import utils
def _setup_paths(project_settings):
# first merge in variables from project_settings - but ignore __doc__ etc
user_settings = [x for x in vars(project_settings).keys() if not x.startswith('__')]
for setting in user_settings:
env[setting] = vars(project_settings)[setting]
# allow for project_settings having set up some of these differently
env.setdefault('verbose', False)
env.setdefault('use_sudo', True)
env.setdefault('cvs_rsh', 'CVS_RSH="ssh"')
env.setdefault('default_branch', {'production': 'master', 'staging': 'master'})
env.setdefault('server_project_home',
path.join(env.server_home, env.project_name))
# TODO: change dev -> current
env.setdefault('vcs_root_dir', path.join(env.server_project_home, 'dev'))
env.setdefault('prev_root', path.join(env.server_project_home, 'previous'))
env.setdefault('next_dir', path.join(env.server_project_home, 'next'))
env.setdefault('dump_dir', path.join(env.server_project_home, 'dbdumps'))
env.setdefault('deploy_dir', path.join(env.vcs_root_dir, 'deploy'))
env.setdefault('settings', '%(project_name)s.settings' % env)
if env.project_type == "django":
env.setdefault('relative_django_dir', env.project_name)
env.setdefault('relative_django_settings_dir', env['relative_django_dir'])
env.setdefault('relative_ve_dir', path.join(env['relative_django_dir'], '.ve'))
# now create the absolute paths of everything else
env.setdefault('django_dir',
path.join(env['vcs_root_dir'], env['relative_django_dir']))
env.setdefault('django_settings_dir',
path.join(env['vcs_root_dir'], env['relative_django_settings_dir']))
env.setdefault('ve_dir',
path.join(env['vcs_root_dir'], env['relative_ve_dir']))
env.setdefault('manage_py', path.join(env['django_dir'], 'manage.py'))
# local_tasks_bin is the local copy of tasks.py
# this should be the copy from where ever fab.py is being run from ...
if 'DEPLOYDIR' in os.environ:
env.setdefault('local_tasks_bin',
path.join(os.environ['DEPLOYDIR'], 'tasks.py'))
else:
env.setdefault('local_tasks_bin',
path.join(path.dirname(__file__), 'tasks.py'))
# valid environments - used for require statements in fablib
env.valid_envs = env.host_list.keys()
def _linux_type():
if 'linux_type' not in env:
# work out if we're based on redhat or centos
# TODO: look up stackoverflow question about this.
if files.exists('/etc/redhat-release'):
env.linux_type = 'redhat'
elif files.exists('/etc/debian_version'):
env.linux_type = 'debian'
else:
# TODO: should we print a warning here?
utils.abort("could not determine linux type of server we're deploying to")
return env.linux_type
def _get_python():
if 'python_bin' not in env:
python26 = path.join('/', 'usr', 'bin', 'python2.6')
if files.exists(python26):
env.python_bin = python26
else:
env.python_bin = path.join('/', 'usr', 'bin', 'python')
return env.python_bin
def _get_tasks_bin():
if 'tasks_bin' not in env:
env.tasks_bin = path.join(env.deploy_dir, 'tasks.py')
return env.tasks_bin
def _tasks(tasks_args, verbose=False):
tasks_cmd = _get_tasks_bin()
if env.verbose or verbose:
tasks_cmd += ' -v'
sudo_or_run(tasks_cmd + ' ' + tasks_args)
def _get_svn_user_and_pass():
if 'svnuser' not in env or len(env.svnuser) == 0:
# prompt user for username
prompt('Enter SVN username:', 'svnuser')
if 'svnpass' not in env or len(env.svnpass) == 0:
# prompt user for password
env.svnpass = getpass.getpass('Enter SVN password:')
def verbose(verbose=True):
"""Set verbose output"""
env.verbose = verbose
def deploy_clean(revision=None):
""" delete the entire install and do a clean install """
if env.environment == 'production':
utils.abort('do not delete the production environment!!!')
require('server_project_home', provided_by=env.valid_envs)
# TODO: dump before cleaning database?
with settings(warn_only=True):
webserver_cmd('stop')
clean_db()
clean_files()
deploy(revision)
def clean_files():
sudo_or_run('rm -rf %s' % env.server_project_home)
def _create_dir_if_not_exists(path):
if not files.exists(path):
sudo_or_run('mkdir -p %s' % path)
def deploy(revision=None, keep=None):
""" update remote host environment (virtualenv, deploy, update)
It takes two arguments:
* revision is the VCS revision ID to checkout (if not specified then
the latest will be checked out)
* keep is the number of old versions to keep around for rollback (default
5)"""
require('server_project_home', provided_by=env.valid_envs)
check_for_local_changes()
_create_dir_if_not_exists(env.server_project_home)
# TODO: check if our live site is in <sitename>/dev/ - if so
# move it to <sitename>/current/ and make a link called dev/ to
# the current/ directory
# TODO: if dev/ is found to be a link, ask the user if the apache config
# has been updated to point at current/ - and if so then delete dev/
# _migrate_from_dev_to_current()
create_copy_for_next()
checkout_or_update(in_next=True, revision=revision)
# remove any old pyc files - essential if the .py file has been removed
if env.project_type == "django":
rm_pyc_files(path.join(env.next_dir, env.relative_django_dir))
# create the deploy virtualenv if we use it
create_deploy_virtualenv(in_next=True)
# we only have to disable this site after creating the rollback copy
# (do this so that apache carries on serving other sites on this server
# and the maintenance page for this vhost)
downtime_start = datetime.now()
link_webserver_conf(maintenance=True)
with settings(warn_only=True):
webserver_cmd('reload')
next_to_current_to_rollback()
# Use tasks.py deploy:env to actually do the deployment, including
# creating the virtualenv if it thinks it necessary, ignoring
# env.use_virtualenv as tasks.py knows nothing about it.
_tasks('deploy:' + env.environment)
# bring this vhost back in, reload the webserver and touch the WSGI
# handler (which reloads the wsgi app)
link_webserver_conf()
webserver_cmd('reload')
downtime_end = datetime.now()
touch_wsgi()
delete_old_rollback_versions(keep)
if env.environment == 'production':
setup_db_dumps()
_report_downtime(downtime_start, downtime_end)
def _report_downtime(downtime_start, downtime_end):
downtime = downtime_end - downtime_start
utils.puts("Downtime lasted for %.1f seconds" % downtime.total_seconds())
utils.puts("(Downtime started at %s and finished at %s)" %
(downtime_start, downtime_end))
def set_up_celery_daemon():
require('vcs_root_dir', 'project_name', provided_by=env)
for command in ('celerybeat', 'celeryd'):
command_project = command + '_' + env.project_name
celery_run_script_location = path.join(env['vcs_root_dir'],
'celery', 'init', command)
celery_run_script = path.join('/etc', 'init.d', command_project)
celery_configuration_location = path.join(env['vcs_root_dir'],
'celery', 'config', command)
celery_configuration_destination = path.join('/etc', 'default',
command_project)
sudo_or_run(" ".join(['cp', celery_run_script_location,
celery_run_script]))
sudo_or_run(" ".join(['chmod', '+x', celery_run_script]))
sudo_or_run(" ".join(['cp', celery_configuration_location,
celery_configuration_destination]))
sudo_or_run('/etc/init.d/%s restart' % command_project)
def clean_old_celery():
"""As the scripts have moved location you might need to get rid of old
versions of celery."""
require('vcs_root_dir', provided_by=env)
for command in ('celerybeat', 'celeryd'):
celery_run_script = path.join('/etc', 'init.d', command)
if files.exists(celery_run_script):
sudo_or_run('/etc/init.d/%s stop' % command)
sudo_or_run('rm %s' % celery_run_script)
celery_configuration_destination = path.join('/etc', 'default', command)
if files.exists(celery_configuration_destination):
sudo_or_run('rm %s' % celery_configuration_destination)
def create_copy_for_next():
"""Copy the current version to "next" so that we can do stuff like
the VCS update and virtualenv update without taking the site offline"""
# TODO: check if next directory already exists
# if it does maybe there was an aborted deploy, or maybe someone else is
# deploying. Either way, stop and ask the user what to do.
if files.exists(env.next_dir):
utils.warn('The "next" directory already exists. Maybe a previous '
'deploy failed, or maybe another deploy is in progress.')
continue_anyway = prompt('Would you like to continue anyway '
'(and delete the current next dir)? [no/yes]',
default='no', validate='^no|yes$')
if continue_anyway.lower() != 'yes':
utils.abort("Aborting deploy - try again when you're certain what to do.")
sudo_or_run('rm -rf %s' % env.next_dir)
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case, don't create it (otherwise the checkout code will get confused).
if files.exists(env.vcs_root_dir):
# cp -a - amongst other things this preserves links and timestamps
# so the compare that bootstrap.py does to see if the virtualenv
# needs an update should still work.
sudo_or_run('cp -a %s %s' % (env.vcs_root_dir, env.next_dir))
def next_to_current_to_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
# if this is the initial deploy, the vcs_root_dir won't exist yet. In that
# case just skip the rollback version.
if files.exists(env.vcs_root_dir):
_create_dir_if_not_exists(env.prev_root)
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
sudo_or_run('mv %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
sudo_or_run('mv %s %s' % (env.next_dir, env.vcs_root_dir))
def create_copy_for_rollback():
"""Move the current version to the previous directory (so we can roll back
to it, move the next version to the current version (so it will be used) and
do a db dump in the rollback directory."""
# create directory for it
prev_dir = path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S"))
_create_dir_if_not_exists(prev_dir)
# cp -a
sudo_or_run('cp %s %s' % (env.vcs_root_dir, prev_dir))
_dump_db_in_previous_directory(prev_dir)
def _dump_db_in_previous_directory(prev_dir):
require('django_settings_dir', provided_by=env.valid_envs)
if (env.project_type == 'django' and
files.exists(path.join(env.django_settings_dir, 'local_settings.py'))):
# dump database (provided local_settings has been set up properly)
with cd(prev_dir):
# just in case there is some other reason why the dump fails
with settings(warn_only=True):
_tasks('dump_db')
def delete_old_rollback_versions(keep=None):
"""Delete old rollback directories, keeping the last "keep" (default 5)"."""
require('prev_root', provided_by=env.valid_envs)
# the -1 argument ensures one directory per line
prev_versions = run('ls -1 ' + env.prev_root).split('\n')
if keep is None:
if 'versions_to_keep' in env:
keep = env.versions_to_keep
else:
keep = 5
else:
keep = int(keep)
if keep == 0:
return
versions_to_keep = -1 * int(keep)
prev_versions_to_delete = prev_versions[:versions_to_keep]
for version_to_delete in prev_versions_to_delete:
sudo_or_run('rm -rf ' + path.join(
env.prev_root, version_to_delete.strip()))
def list_previous():
"""List the previous versions available to rollback to."""
# could also determine the VCS revision number
require('prev_root', provided_by=env.valid_envs)
run('ls ' + env.prev_root)
def rollback(version='last', migrate=False, restore_db=False):
"""Redeploy one of the old versions.
Arguments are 'version', 'migrate' and 'restore_db':
* if version is 'last' (the default) then the most recent version will be
restored. Otherwise specify by timestamp - use list_previous to get a list
of available versions.
* if restore_db is True, then the database will be restored as well as the
code. The default is False.
* if migrate is True, then fabric will attempt to work out the new and old
migration status and run the migrations to match the database versions.
The default is False
Note that migrate and restore_db cannot both be True."""
require('prev_root', 'vcs_root_dir', provided_by=env.valid_envs)
if migrate and restore_db:
utils.abort('rollback cannot do both migrate and restore_db')
if migrate:
utils.abort("rollback: haven't worked out how to do migrate yet ...")
if version == 'last':
# get the latest directory from prev_dir
# list directories in env.prev_root, use last one
version = run('ls ' + env.prev_root).split('\n')[-1]
# check version specified exists
rollback_dir = path.join(env.prev_root, version)
if not files.exists(rollback_dir):
utils.abort("Cannot rollback to version %s, it does not exist, use list_previous to see versions available" % version)
webserver_cmd("stop")
# first copy this version out of the way
create_copy_for_rollback()
if migrate:
# run the south migrations back to the old version
# but how to work out what the old version is??
pass
if restore_db:
# feed the dump file into mysql command
with cd(rollback_dir):
_tasks('load_dbdump')
# delete everything - don't want stray files left over
sudo_or_run('rm -rf %s' % env.vcs_root_dir)
# cp -a from rollback_dir to vcs_root_dir
sudo_or_run('cp -a %s %s' % (rollback_dir, env.vcs_root_dir))
webserver_cmd("start")
def local_test():
""" run the django tests on the local machine """
require('project_name')
with cd(path.join("..", env.project_name)):
local("python " + env.test_cmd, capture=False)
def remote_test():
""" run the django tests remotely - staging only """
require('django_dir', provided_by=env.valid_envs)
if env.environment == 'production':
utils.abort('do not run tests on the production environment')
with cd(env.django_dir):
sudo_or_run(_get_python() + env.test_cmd)
def version():
""" return the deployed VCS revision and commit comments"""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
if env.repo_type == "git":
with cd(env.vcs_root_dir):
sudo_or_run('git log | head -5')
elif env.repo_type == "svn":
_get_svn_user_and_pass()
with cd(env.vcs_root_dir):
with hide('running'):
cmd = 'svn log --non-interactive --username %s --password %s | head -4' % (env.svnuser, env.svnpass)
sudo_or_run(cmd)
else:
utils.abort('Unsupported repo type: %s' % (env.repo_type))
def _check_git_branch():
env.revision = None
with cd(env.vcs_root_dir):
with settings(warn_only=True):
# get branch information
server_branch = sudo_or_run('git rev-parse --abbrev-ref HEAD')
server_commit = sudo_or_run('git rev-parse HEAD')
local_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
default_branch = env.default_branch.get(env.environment, 'master')
git_branch_r = sudo_or_run('git branch --color=never -r')
git_branch_r = git_branch_r.split('\n')
branches = [b.split('/')[-1].strip() for b in git_branch_r if 'HEAD' not in b]
# if all branches are the same, just stick to this branch
if server_branch == local_branch == default_branch:
env.revision = server_branch
else:
if server_branch == 'HEAD':
# not on a branch - just print a warning
print 'The server git repository is not on a branch'
print 'Branch mismatch found:'
print '* %s is the default branch for this server' % default_branch
if server_branch == 'HEAD':
print '* %s is the commit checked out on the server.' % server_commit
else:
print '* %s is the branch currently checked out on the server' % server_branch
print '* %s is the current branch of your local git repo' % local_branch
print ''
print 'Available branches are:'
for branch in branches:
print '* %s' % branch
print ''
escaped_branches = [re.escape(b) for b in branches]
validate_branch = '^' + '|'.join(escaped_branches) + '$'
env.revision = prompt('Which branch would you like to use on the server? (or hit Ctrl-C to exit)',
default=default_branch, validate=validate_branch)
def check_for_local_changes():
""" check if there are local changes on the remote server """
require('repo_type', 'vcs_root_dir', provided_by=env.valid_envs)
status_cmd = {
'svn': 'svn status --quiet',
'git': 'git status --short',
'cvs': '#not worked out yet'
}
if env.repo_type == 'cvs':
print "TODO: write CVS status command"
return
if files.exists(path.join(env.vcs_root_dir, "." + env.repo_type)):
with cd(env.vcs_root_dir):
status = sudo_or_run(status_cmd[env.repo_type])
if status:
print 'Found local changes on %s server' % env.environment
print status
cont = prompt('Would you like to continue with deployment? (yes/no)',
default='no', validate=r'^yes|no$')
if cont == 'no':
utils.abort('Aborting deployment')
if env.repo_type == 'git':
_check_git_branch()
def checkout_or_update(in_next=False, revision=None):
""" checkout or update the project from version control.
This command works with svn, git and cvs repositories.
You can also specify a revision to checkout, as an argument."""
require('server_project_home', 'repo_type', 'vcs_root_dir', 'repository',
provided_by=env.valid_envs)
checkout_fn = {
'cvs': _checkout_or_update_cvs,
'svn': _checkout_or_update_svn,
'git': _checkout_or_update_git,
}
if in_next:
vcs_root_dir = env.next_dir
else:
vcs_root_dir = env.vcs_root_dir
if env.repo_type.lower() in checkout_fn:
checkout_fn[env.repo_type](vcs_root_dir, revision)
else:
utils.abort('Unsupported VCS: %s' % env.repo_type.lower())
def _checkout_or_update_svn(vcs_root_dir, revision=None):
# function to ask for svnuser and svnpass
_get_svn_user_and_pass()
# if the .svn directory exists, do an update, otherwise do
# a checkout
cmd = 'svn %s --non-interactive --no-auth-cache --username %s --password %s'
if files.exists(path.join(vcs_root_dir, ".svn")):
cmd = cmd % ('update', env.svnuser, env.svnpass)
if revision:
cmd += " --revision " + revision
with cd(vcs_root_dir):
with hide('running'):
sudo_or_run(cmd)
else:
cmd = cmd + " %s %s"
cmd = cmd % ('checkout', env.svnuser, env.svnpass, env.repository, vcs_root_dir)
if revision:
cmd += "@" + revision
with cd(env.server_project_home):
with hide('running'):
sudo_or_run(cmd)
def _checkout_or_update_git(vcs_root_dir, revision=None):
# if the .git directory exists, do an update, otherwise do
# a clone
if files.exists(path.join(vcs_root_dir, ".git")):
with cd(vcs_root_dir):
sudo_or_run('git remote rm origin')
sudo_or_run('git remote add origin %s' % env.repository)
# fetch now, merge later (if on branch)
sudo_or_run('git fetch origin')
if revision is None:
revision = env.revision
with cd(vcs_root_dir):
stash_result = sudo_or_run('git stash')
sudo_or_run('git checkout %s' % revision)
# check if revision is a branch, and do a merge if it is
with settings(warn_only=True):
rev_is_branch = sudo_or_run('git branch -r | grep %s' % revision)
# use old fabric style here to support Ubuntu 10.04
if not rev_is_branch.failed:
sudo_or_run('git merge origin/%s' % revision)
# if we did a stash, now undo it
if not stash_result.startswith("No local changes"):
sudo_or_run('git stash pop')
else:
with cd(env.server_project_home):
default_branch = env.default_branch.get(env.environment, 'master')
sudo_or_run('git clone -b %s %s %s' %
(default_branch, env.repository, vcs_root_dir))
if files.exists(path.join(vcs_root_dir, ".gitmodules")):
with cd(vcs_root_dir):
sudo_or_run('git submodule update --init')
def _checkout_or_update_cvs(vcs_root_dir, revision=None):
if files.exists(vcs_root_dir):
with cd(vcs_root_dir):
sudo_or_run('CVS_RSH="ssh" cvs update -d -P')
else:
if 'cvs_user' in env:
user_spec = env.cvs_user + "@"
else:
user_spec = ""
with cd(env.server_project_home):
cvs_options = '-d:%s:%s%s:%s' % (env.cvs_connection_type,
user_spec,
env.repository,
env.repo_path)
command_options = '-d %s' % vcs_root_dir
if revision is not None:
command_options += ' -r ' + revision
sudo_or_run('%s cvs %s checkout %s %s' % (env.cvs_rsh, cvs_options,
command_options,
env.cvs_project))
def sudo_or_run(command):
if env.use_sudo:
return sudo(command)
else:
return run(command)
def create_deploy_virtualenv(in_next=False):
""" if using new style dye stuff, create the virtualenv to hold dye """
require('deploy_dir', provided_by=env.valid_envs)
if in_next:
# TODO: use relative_deploy_dir
bootstrap_path = path.join(env.next_dir, 'deploy', 'bootstrap.py')
else:
bootstrap_path = path.join(env.deploy_dir, 'bootstrap.py')
sudo_or_run('%s %s --full-rebuild --quiet' %
(_get_python(), bootstrap_path))
def update_requirements():
""" update external dependencies on remote host """
_tasks('update_ve')
def collect_static_files():
""" coolect static files in the 'static' directory """
sudo(_get_tasks_bin() + ' collect_static')
def clean_db(revision=None):
""" delete the entire database """
if env.environment == 'production':
utils.abort('do not delete the production database!!!')
_tasks("clean_db")
def get_remote_dump(filename='/tmp/db_dump.sql', local_filename='./db_dump.sql',
rsync=True):
""" do a remote database dump and copy it to the local filesystem """
# future enhancement, do a mysqldump --skip-extended-insert (one insert
# per line) and then do rsync rather than get() - less data transferred on
# however rsync might need ssh keys etc
require('user', 'host', provided_by=env.valid_envs)
if rsync:
_tasks('dump_db:' + filename + ',for_rsync=true')
local("rsync -vz -e 'ssh -p %s' %s@%s:%s %s" % (env.port,
env.user, env.host, filename, local_filename))
else:
_tasks('dump_db:' + filename)
get(filename, local_path=local_filename)
sudo_or_run('rm ' + filename)
def get_remote_dump_and_load(filename='/tmp/db_dump.sql',
local_filename='./db_dump.sql', keep_dump=True, rsync=True):
""" do a remote database dump, copy it to the local filesystem and then
load it into the local database """
get_remote_dump(filename=filename, local_filename=local_filename, rsync=rsync)
local(env.local_tasks_bin + ' restore_db:' + local_filename)
if not keep_dump:
local('rm ' + local_filename)
def update_db(force_use_migrations=False):
""" create and/or update the database, do migrations etc """
_tasks('update_db:force_use_migrations=%s' % force_use_migrations)
def setup_db_dumps():
""" set up mysql database dumps """
require('dump_dir', provided_by=env.valid_envs)
_tasks('setup_db_dumps:' + env.dump_dir)
def touch_wsgi():
""" touch wsgi file to trigger reload """
require('vcs_root_dir', provided_by=env.valid_envs)
wsgi_dir = path.join(env.vcs_root_dir, 'wsgi')
sudo_or_run('touch ' + path.join(wsgi_dir, 'wsgi_handler.py'))
def rm_pyc_files(py_dir=None):
"""Remove all the old pyc files to prevent stale files being used"""
require('django_dir', provided_by=env.valid_envs)
if py_dir is None:
py_dir = env.django_dir
with settings(warn_only=True):
with cd(py_dir):
sudo_or_run('find . -name \*.pyc | xargs rm')
def _delete_file(path):
if files.exists(path):
sudo_or_run('rm %s' % path)
def _link_files(source_file, target_path):
if not files.exists(target_path):
sudo_or_run('ln -s %s %s' % (source_file, target_path))
def link_webserver_conf(maintenance=False):
"""link the webserver conf file"""
require('vcs_root_dir', provided_by=env.valid_envs)
if env.webserver is None:
return
vcs_config_stub = path.join(env.vcs_root_dir, env.webserver, env.environment)
vcs_config_live = vcs_config_stub + '.conf'
vcs_config_maintenance = vcs_config_stub + '-maintenance.conf'
webserver_conf = _webserver_conf_path()
if maintenance:
_delete_file(webserver_conf)
if not files.exists(vcs_config_maintenance):
return
_link_files(vcs_config_maintenance, webserver_conf)
else:
if not files.exists(vcs_config_live):
utils.abort('No %s conf file found - expected %s' %
(env.webserver, vcs_config_live))
_delete_file(webserver_conf)
_link_files(vcs_config_live, webserver_conf)
# debian has sites-available/sites-enabled split with links
if _linux_type() == 'debian':
webserver_conf_enabled = webserver_conf.replace('available', 'enabled')
sudo_or_run('ln -s %s %s' % (webserver_conf, webserver_conf_enabled))
webserver_configtest()
def _webserver_conf_path():
webserver_conf_dir = {
'apache_redhat': '/etc/httpd/conf.d',
'apache_debian': '/etc/apache2/sites-available',
}
key = env.webserver + '_' + _linux_type()
if key in webserver_conf_dir:
return path.join(webserver_conf_dir[key],
'%s_%s.conf' % (env.project_name, env.environment))
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_configtest():
""" test webserver configuration """
tests = {
'apache_redhat': '/usr/sbin/httpd -S',
'apache_debian': '/usr/sbin/apache2ctl -S',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in tests:
sudo(tests[key])
else:
utils.abort('webserver %s is not supported (linux type %s)' %
(env.webserver, _linux_type()))
def webserver_reload():
""" reload webserver on remote host """
webserver_cmd('reload')
def webserver_restart():
""" restart webserver on remote host """
webserver_cmd('restart')
def webserver_cmd(cmd):
""" run cmd against webserver init.d script """
cmd_strings = {
'apache_redhat': '/etc/init.d/httpd',
'apache_debian': '/etc/init.d/apache2',
}
if env.webserver:
key = env.webserver + '_' + _linux_type()
if key in cmd_strings:
sudo(cmd_strings[key] + ' ' + cmd)
else:
utils.abort('webserver %s is not supported' % env.webserver)
| qris/mailer-dye | dye/fablib.py | Python | gpl-3.0 | 29,664 |
from collections import deque
class Node(tuple):
##### ^ -- backwad
##### v -- forward
##### <-- left
##### --> right
LEFT,RIGHT,TOP,BOTTOM,FORWARD,BACKWARD = range(6)
def __new__(cls, r, c, five):
return tuple.__new__(cls, (r,c,five))
def __init__(self, r, c, five):
pass
def neighbors(self, mat, n):
r,c,_ = self
if r > 0 and mat[r-1][c] != '*':
yield self.roll_backwards()
if c > 0 and mat[r][c-1] != '*':
yield self.roll_left()
if r < n-1 and mat[r+1][c] != '*':
yield self.roll_forward()
if c < n-1 and mat[r][c+1] != '*':
yield self.roll_right()
def roll_forward(self):
r,c,five = self
if five == Node.FORWARD:
five = Node.BOTTOM
elif five == Node.BOTTOM:
five = Node.BACKWARD
elif five == Node.BACKWARD:
five = Node.TOP
elif five == Node.TOP:
five = Node.FORWARD
return Node(r+1, c, five)
def roll_backwards(self):
r,c,five = self
if five == Node.FORWARD:
five = Node.TOP
elif five == Node.BOTTOM:
five = Node.FORWARD
elif five == Node.BACKWARD:
five = Node.BOTTOM
elif five == Node.TOP:
five = Node.BACKWARD
return Node(r-1, c, five)
def roll_left(self):
r,c,five = self
if five == Node.LEFT:
five = Node.BOTTOM
elif five == Node.BOTTOM:
five = Node.RIGHT
elif five == Node.RIGHT:
five = Node.TOP
elif five == Node.TOP:
five = Node.LEFT
return Node(r, c-1, five)
def roll_right(self):
r,c,five = self
if five == Node.LEFT:
five = Node.TOP
elif five == Node.BOTTOM:
five = Node.LEFT
elif five == Node.RIGHT:
five = Node.BOTTOM
elif five == Node.TOP:
five = Node.RIGHT
return Node(r, c+1, five)
def find_ends(mat):
start, end = None, None
for r, row in enumerate(mat):
for c, x in enumerate(row):
if x == 'S':
start = Node(r,c,Node.LEFT)
if end:
return start,end
elif x == 'H':
end = Node(r,c,Node.BOTTOM)
if start:
return start, end
def dfs(mat,n,start,end):
stack = deque()
stack.append(start)
visited = {start}
while stack:
curr = stack.pop()
if curr == end:
return True
for neighbor in curr.neighbors(mat,n):
if neighbor not in visited:
visited.add(neighbor)
stack.append(neighbor)
return False
def main():
for _ in range(int(input())):
n = int(input())
mat = [input() for _ in range(n)]
start,end = find_ends(mat)
print('Yes' if dfs(mat,n,start,end) else 'No')
if __name__ == "__main__":
main() | JonSteinn/Kattis-Solutions | src/Dacey the Dice/Python 3/main.py | Python | gpl-3.0 | 3,045 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python implementation of Poincaré Embeddings.
These embeddings are better at capturing latent hierarchical information than traditional Euclidean embeddings.
The method is described in detail in `Maximilian Nickel, Douwe Kiela -
"Poincaré Embeddings for Learning Hierarchical Representations" <https://arxiv.org/abs/1705.08039>`_.
The main use-case is to automatically learn hierarchical representations of nodes from a tree-like structure,
such as a Directed Acyclic Graph (DAG), using a transitive closure of the relations. Representations of nodes in a
symmetric graph can also be learned.
This module allows training Poincaré Embeddings from a training file containing relations of graph in a
csv-like format, or from a Python iterable of relations.
Examples
--------
Initialize and train a model from a list
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
Initialize and train a model from a file containing one relation per line
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
>>> model.train(epochs=50)
"""
import csv
import logging
from numbers import Integral
import sys
import time
import numpy as np
from collections import defaultdict, Counter
from numpy import random as np_random
from scipy.stats import spearmanr
from six import string_types
from six.moves import zip, range
from gensim import utils, matutils
from gensim.models.keyedvectors import Vocab, BaseKeyedVectors
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format
from numpy import float32 as REAL
try:
from autograd import grad # Only required for optionally verifying gradients while training
from autograd import numpy as grad_np
AUTOGRAD_PRESENT = True
except ImportError:
AUTOGRAD_PRESENT = False
logger = logging.getLogger(__name__)
class PoincareModel(utils.SaveLoad):
"""Train, use and evaluate Poincare Embeddings.
The model can be stored/loaded via its :meth:`~gensim.models.poincare.PoincareModel.save`
and :meth:`~gensim.models.poincare.PoincareModel.load` methods, or stored/loaded in the word2vec format
via `model.kv.save_word2vec_format` and :meth:`~gensim.models.poincare.PoincareKeyedVectors.load_word2vec_format`.
Notes
-----
Training cannot be resumed from a model loaded via `load_word2vec_format`, if you wish to train further,
use :meth:`~gensim.models.poincare.PoincareModel.save` and :meth:`~gensim.models.poincare.PoincareModel.load`
methods instead.
An important attribute (that provides a lot of additional functionality when directly accessed) are the
keyed vectors:
self.kv : :class:`~gensim.models.poincare.PoincareKeyedVectors`
This object essentially contains the mapping between nodes and embeddings, as well the vocabulary of the model
(set of unique nodes seen by the model). After training, it can be used to perform operations on the vectors
such as vector lookup, distance and similarity calculations etc.
See the documentation of its class for usage examples.
"""
def __init__(self, train_data, size=50, alpha=0.1, negative=10, workers=1, epsilon=1e-5, regularization_coeff=1.0,
burn_in=10, burn_in_alpha=0.01, init_range=(-0.001, 0.001), dtype=np.float64, seed=0):
"""Initialize and train a Poincare embedding model from an iterable of relations.
Parameters
----------
train_data : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
size : int, optional
Number of dimensions of the trained model.
alpha : float, optional
Learning rate for training.
negative : int, optional
Number of negative samples to use.
workers : int, optional
Number of threads to use for training the model.
epsilon : float, optional
Constant used for clipping embeddings below a norm of one.
regularization_coeff : float, optional
Coefficient used for l2-regularization while training (0 effectively disables regularization).
burn_in : int, optional
Number of epochs to use for burn-in initialization (0 means no burn-in).
burn_in_alpha : float, optional
Learning rate for burn-in initialization, ignored if `burn_in` is 0.
init_range : 2-tuple (float, float)
Range within which the vectors are randomly initialized.
dtype : numpy.dtype
The numpy dtype to use for the vectors in the model (numpy.float64, numpy.float32 etc).
Using lower precision floats may be useful in increasing training speed and reducing memory usage.
seed : int, optional
Seed for random to ensure reproducibility.
Examples
--------
Initialize a model from a list:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
Initialize a model from a file containing one relation per line:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
See :class:`~gensim.models.poincare.PoincareRelations` for more options.
"""
self.train_data = train_data
self.kv = PoincareKeyedVectors(size)
self.all_relations = []
self.node_relations = defaultdict(set)
self._negatives_buffer = NegativesBuffer([])
self._negatives_buffer_size = 2000
self.size = size
self.train_alpha = alpha # Learning rate for training
self.burn_in_alpha = burn_in_alpha # Learning rate for burn-in
self.alpha = alpha # Current learning rate
self.negative = negative
self.workers = workers
self.epsilon = epsilon
self.regularization_coeff = regularization_coeff
self.burn_in = burn_in
self._burn_in_done = False
self.dtype = dtype
self.seed = seed
self._np_random = np_random.RandomState(seed)
self.init_range = init_range
self._loss_grad = None
self.build_vocab(train_data)
def build_vocab(self, relations, update=False):
"""Build the model's vocabulary from known relations.
Parameters
----------
relations : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
update : bool, optional
If true, only new nodes's embeddings are initialized.
Use this when the model already has an existing vocabulary and you want to update it.
If false, all node's embeddings are initialized.
Use this when you're creating a new vocabulary from scratch.
Examples
--------
Train a model and update vocab for online training:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>>
>>> # train a new model from initial data
>>> initial_relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal')]
>>> model = PoincareModel(initial_relations, negative=1)
>>> model.train(epochs=50)
>>>
>>> # online training: update the vocabulary and continue training
>>> online_relations = [('striped_skunk', 'mammal')]
>>> model.build_vocab(online_relations, update=True)
>>> model.train(epochs=50)
"""
old_index2word_len = len(self.kv.index2word)
logger.info("loading relations from train data..")
for relation in relations:
if len(relation) != 2:
raise ValueError('Relation pair "%s" should have exactly two items' % repr(relation))
for item in relation:
if item in self.kv.vocab:
self.kv.vocab[item].count += 1
else:
self.kv.vocab[item] = Vocab(count=1, index=len(self.kv.index2word))
self.kv.index2word.append(item)
node_1, node_2 = relation
node_1_index, node_2_index = self.kv.vocab[node_1].index, self.kv.vocab[node_2].index
self.node_relations[node_1_index].add(node_2_index)
relation = (node_1_index, node_2_index)
self.all_relations.append(relation)
logger.info("loaded %d relations from train data, %d nodes", len(self.all_relations), len(self.kv.vocab))
self.indices_set = set(range(len(self.kv.index2word))) # Set of all node indices
self.indices_array = np.fromiter(range(len(self.kv.index2word)), dtype=int) # Numpy array of all node indices
self._init_node_probabilities()
if not update:
self._init_embeddings()
else:
self._update_embeddings(old_index2word_len)
def _init_embeddings(self):
"""Randomly initialize vectors for the items in the vocab."""
shape = (len(self.kv.index2word), self.size)
self.kv.syn0 = self._np_random.uniform(self.init_range[0], self.init_range[1], shape).astype(self.dtype)
def _update_embeddings(self, old_index2word_len):
"""Randomly initialize vectors for the items in the additional vocab."""
shape = (len(self.kv.index2word) - old_index2word_len, self.size)
v = self._np_random.uniform(self.init_range[0], self.init_range[1], shape).astype(self.dtype)
self.kv.syn0 = np.concatenate([self.kv.syn0, v])
def _init_node_probabilities(self):
"""Initialize a-priori probabilities."""
counts = np.fromiter((
self.kv.vocab[self.kv.index2word[i]].count
for i in range(len(self.kv.index2word))
),
dtype=np.float64, count=len(self.kv.index2word))
self._node_counts_cumsum = np.cumsum(counts)
self._node_probabilities = counts / counts.sum()
def _get_candidate_negatives(self):
"""Get candidate negatives of size `self.negative` from the negative examples buffer.
Returns
-------
numpy.array
Array of shape (`self.negative`,) containing indices of negative nodes.
"""
if self._negatives_buffer.num_items() < self.negative:
# cumsum table of counts used instead of the standard approach of a probability cumsum table
# this is to avoid floating point errors that result when the number of nodes is very high
# for reference: https://github.com/RaRe-Technologies/gensim/issues/1917
max_cumsum_value = self._node_counts_cumsum[-1]
uniform_numbers = self._np_random.randint(1, max_cumsum_value + 1, self._negatives_buffer_size)
cumsum_table_indices = np.searchsorted(self._node_counts_cumsum, uniform_numbers)
self._negatives_buffer = NegativesBuffer(cumsum_table_indices)
return self._negatives_buffer.get_items(self.negative)
def _sample_negatives(self, node_index):
"""Get a sample of negatives for the given node.
Parameters
----------
node_index : int
Index of the positive node for which negative samples are to be returned.
Returns
-------
numpy.array
Array of shape (self.negative,) containing indices of negative nodes for the given node index.
"""
node_relations = self.node_relations[node_index]
num_remaining_nodes = len(self.kv.vocab) - len(node_relations)
if num_remaining_nodes < self.negative:
raise ValueError(
'Cannot sample %d negative nodes from a set of %d negative nodes for %s' %
(self.negative, num_remaining_nodes, self.kv.index2word[node_index])
)
positive_fraction = float(len(node_relations)) / len(self.kv.vocab)
if positive_fraction < 0.01:
# If number of positive relations is a small fraction of total nodes
# re-sample till no positively connected nodes are chosen
indices = self._get_candidate_negatives()
unique_indices = set(indices)
times_sampled = 1
while (len(indices) != len(unique_indices)) or (unique_indices & node_relations):
times_sampled += 1
indices = self._get_candidate_negatives()
unique_indices = set(indices)
if times_sampled > 1:
logger.debug('sampled %d times, positive fraction %.5f', times_sampled, positive_fraction)
else:
# If number of positive relations is a significant fraction of total nodes
# subtract positively connected nodes from set of choices and sample from the remaining
valid_negatives = np.array(list(self.indices_set - node_relations))
probs = self._node_probabilities[valid_negatives]
probs /= probs.sum()
indices = self._np_random.choice(valid_negatives, size=self.negative, p=probs, replace=False)
return list(indices)
@staticmethod
def _loss_fn(matrix, regularization_coeff=1.0):
"""Computes loss value.
Parameters
----------
matrix : numpy.array
Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
regularization_coeff : float, optional
Coefficient to use for l2-regularization
Returns
-------
float
Computed loss value.
Warnings
--------
Only used for autograd gradients, since autograd requires a specific function signature.
"""
vector_u = matrix[0]
vectors_v = matrix[1:]
euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
norm = grad_np.linalg.norm(vector_u)
all_norms = grad_np.linalg.norm(vectors_v, axis=1)
poincare_dists = grad_np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
exp_negative_distances = grad_np.exp(-poincare_dists)
regularization_term = regularization_coeff * grad_np.linalg.norm(vectors_v[0]) ** 2
return -grad_np.log(exp_negative_distances[0] / (exp_negative_distances.sum())) + regularization_term
@staticmethod
def _clip_vectors(vectors, epsilon):
"""Clip vectors to have a norm of less than one.
Parameters
----------
vectors : numpy.array
Can be 1-D, or 2-D (in which case the norm for each row is checked).
epsilon : float
Parameter for numerical stability, each dimension of the vector is reduced by `epsilon`
if the norm of the vector is greater than or equal to 1.
Returns
-------
numpy.array
Array with norms clipped below 1.
"""
one_d = len(vectors.shape) == 1
threshold = 1 - epsilon
if one_d:
norm = np.linalg.norm(vectors)
if norm < threshold:
return vectors
else:
return vectors / norm - (np.sign(vectors) * epsilon)
else:
norms = np.linalg.norm(vectors, axis=1)
if (norms < threshold).all():
return vectors
else:
vectors[norms >= threshold] *= (threshold / norms[norms >= threshold])[:, np.newaxis]
vectors[norms >= threshold] -= np.sign(vectors[norms >= threshold]) * epsilon
return vectors
def save(self, *args, **kwargs):
"""Save complete model to disk, inherited from :class:`~gensim.utils.SaveLoad`.
See also
--------
:meth:`~gensim.models.poincare.PoincareModel.load`
Parameters
----------
*args
Positional arguments passed to :meth:`~gensim.utils.SaveLoad.save`.
**kwargs
Keyword arguments passed to :meth:`~gensim.utils.SaveLoad.save`.
"""
self._loss_grad = None # Can't pickle autograd fn to disk
attrs_to_ignore = ['_node_probabilities', '_node_counts_cumsum']
kwargs['ignore'] = set(list(kwargs.get('ignore', [])) + attrs_to_ignore)
super(PoincareModel, self).save(*args, **kwargs)
@classmethod
def load(cls, *args, **kwargs):
"""Load model from disk, inherited from :class:`~gensim.utils.SaveLoad`.
See also
--------
:meth:`~gensim.models.poincare.PoincareModel.save`
Parameters
----------
*args
Positional arguments passed to :meth:`~gensim.utils.SaveLoad.load`.
**kwargs
Keyword arguments passed to :meth:`~gensim.utils.SaveLoad.load`.
Returns
-------
:class:`~gensim.models.poincare.PoincareModel`
The loaded model.
"""
model = super(PoincareModel, cls).load(*args, **kwargs)
model._init_node_probabilities()
return model
def _prepare_training_batch(self, relations, all_negatives, check_gradients=False):
"""Create a training batch and compute gradients and loss for the batch.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
Node indices, computed gradients and loss for the batch.
"""
batch_size = len(relations)
indices_u, indices_v = [], []
for relation, negatives in zip(relations, all_negatives):
u, v = relation
indices_u.append(u)
indices_v.append(v)
indices_v.extend(negatives)
vectors_u = self.kv.syn0[indices_u]
vectors_v = self.kv.syn0[indices_v].reshape((batch_size, 1 + self.negative, self.size))
vectors_v = vectors_v.swapaxes(0, 1).swapaxes(1, 2)
batch = PoincareBatch(vectors_u, vectors_v, indices_u, indices_v, self.regularization_coeff)
batch.compute_all()
if check_gradients:
self._check_gradients(relations, all_negatives, batch)
return batch
def _check_gradients(self, relations, all_negatives, batch, tol=1e-8):
"""Compare computed gradients for batch to autograd gradients.
Parameters
----------
relations : list of tuples
List of tuples of positive examples of the form (node_1_index, node_2_index).
all_negatives : list of lists
List of lists of negative samples for each node_1 in the positive examples.
batch : :class:`~gensim.models.poincare.PoincareBatch`
Batch for which computed gradients are to be checked.
tol : float, optional
The maximum error between our computed gradients and the reference ones from autograd.
"""
if not AUTOGRAD_PRESENT:
logger.warning('autograd could not be imported, cannot do gradient checking')
logger.warning('please install autograd to enable gradient checking')
return
if self._loss_grad is None:
self._loss_grad = grad(PoincareModel._loss_fn)
max_diff = 0.0
for i, (relation, negatives) in enumerate(zip(relations, all_negatives)):
u, v = relation
auto_gradients = self._loss_grad(
np.vstack((self.kv.syn0[u], self.kv.syn0[[v] + negatives])), self.regularization_coeff)
computed_gradients = np.vstack((batch.gradients_u[:, i], batch.gradients_v[:, :, i]))
diff = np.abs(auto_gradients - computed_gradients).max()
if diff > max_diff:
max_diff = diff
logger.info('max difference between computed gradients and autograd gradients: %.10f', max_diff)
assert max_diff < tol, (
'Max difference between computed gradients and autograd gradients %.10f, '
'greater than tolerance %.10f' % (max_diff, tol))
def _sample_negatives_batch(self, nodes):
"""Get negative examples for each node.
Parameters
----------
nodes : iterable of int
Iterable of node indices for which negative samples are to be returned.
Returns
-------
list of lists
Each inner list is a list of negative samples for a single node in the input list.
"""
all_indices = [self._sample_negatives(node) for node in nodes]
return all_indices
def _train_on_batch(self, relations, check_gradients=False):
"""Perform training for a single training batch.
Parameters
----------
relations : list of tuples of (int, int)
List of tuples of positive examples of the form (node_1_index, node_2_index).
check_gradients : bool, optional
Whether to compare the computed gradients to autograd gradients for this batch.
Returns
-------
:class:`~gensim.models.poincare.PoincareBatch`
The batch that was just trained on, contains computed loss for the batch.
"""
all_negatives = self._sample_negatives_batch(relation[0] for relation in relations)
batch = self._prepare_training_batch(relations, all_negatives, check_gradients)
self._update_vectors_batch(batch)
return batch
@staticmethod
def _handle_duplicates(vector_updates, node_indices):
"""Handle occurrences of multiple updates to the same node in a batch of vector updates.
Parameters
----------
vector_updates : numpy.array
Array with each row containing updates to be performed on a certain node.
node_indices : list of int
Node indices on which the above updates are to be performed on.
Notes
-----
Mutates the `vector_updates` array.
Required because vectors[[2, 1, 2]] += np.array([-0.5, 1.0, 0.5]) performs only the last update
on the row at index 2.
"""
counts = Counter(node_indices)
node_dict = defaultdict(list)
for i, node_index in enumerate(node_indices):
node_dict[node_index].append(i)
for node_index, count in counts.items():
if count == 1:
continue
positions = node_dict[node_index]
# Move all updates to the same node to the last such update, zeroing all the others
vector_updates[positions[-1]] = vector_updates[positions].sum(axis=0)
vector_updates[positions[:-1]] = 0
def _update_vectors_batch(self, batch):
"""Update vectors for nodes in the given batch.
Parameters
----------
batch : :class:`~gensim.models.poincare.PoincareBatch`
Batch containing computed gradients and node indices of the batch for which updates are to be done.
"""
grad_u, grad_v = batch.gradients_u, batch.gradients_v
indices_u, indices_v = batch.indices_u, batch.indices_v
batch_size = len(indices_u)
u_updates = (self.alpha * (batch.alpha ** 2) / 4 * grad_u).T
self._handle_duplicates(u_updates, indices_u)
self.kv.syn0[indices_u] -= u_updates
self.kv.syn0[indices_u] = self._clip_vectors(self.kv.syn0[indices_u], self.epsilon)
v_updates = self.alpha * (batch.beta ** 2)[:, np.newaxis] / 4 * grad_v
v_updates = v_updates.swapaxes(1, 2).swapaxes(0, 1)
v_updates = v_updates.reshape(((1 + self.negative) * batch_size, self.size))
self._handle_duplicates(v_updates, indices_v)
self.kv.syn0[indices_v] -= v_updates
self.kv.syn0[indices_v] = self._clip_vectors(self.kv.syn0[indices_v], self.epsilon)
def train(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Train Poincare embeddings using loaded data and model parameters.
Parameters
----------
epochs : int
Number of iterations (epochs) over the corpus.
batch_size : int, optional
Number of examples to train on in a single batch.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
# Some divide-by-zero results are handled explicitly
old_settings = np.seterr(divide='ignore', invalid='ignore')
logger.info(
"training model of size %d with %d workers on %d relations for %d epochs and %d burn-in epochs, "
"using lr=%.5f burn-in lr=%.5f negative=%d",
self.size, self.workers, len(self.all_relations), epochs, self.burn_in,
self.alpha, self.burn_in_alpha, self.negative
)
if self.burn_in > 0 and not self._burn_in_done:
logger.info("starting burn-in (%d epochs)----------------------------------------", self.burn_in)
self.alpha = self.burn_in_alpha
self._train_batchwise(
epochs=self.burn_in, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
self._burn_in_done = True
logger.info("burn-in finished")
self.alpha = self.train_alpha
logger.info("starting training (%d epochs)----------------------------------------", epochs)
self._train_batchwise(
epochs=epochs, batch_size=batch_size, print_every=print_every,
check_gradients_every=check_gradients_every)
logger.info("training finished")
np.seterr(**old_settings)
def _train_batchwise(self, epochs, batch_size=10, print_every=1000, check_gradients_every=None):
"""Train Poincare embeddings using specified parameters.
Parameters
----------
epochs : int
Number of iterations (epochs) over the corpus.
batch_size : int, optional
Number of examples to train on in a single batch.
print_every : int, optional
Prints progress and average loss after every `print_every` batches.
check_gradients_every : int or None, optional
Compares computed gradients and autograd gradients after every `check_gradients_every` batches.
Useful for debugging, doesn't compare by default.
"""
if self.workers > 1:
raise NotImplementedError("Multi-threaded version not implemented yet")
for epoch in range(1, epochs + 1):
indices = list(range(len(self.all_relations)))
self._np_random.shuffle(indices)
avg_loss = 0.0
last_time = time.time()
for batch_num, i in enumerate(range(0, len(indices), batch_size), start=1):
should_print = not (batch_num % print_every)
check_gradients = bool(check_gradients_every) and (batch_num % check_gradients_every) == 0
batch_indices = indices[i:i + batch_size]
relations = [self.all_relations[idx] for idx in batch_indices]
result = self._train_on_batch(relations, check_gradients=check_gradients)
avg_loss += result.loss
if should_print:
avg_loss /= print_every
time_taken = time.time() - last_time
speed = print_every * batch_size / time_taken
logger.info(
'training on epoch %d, examples #%d-#%d, loss: %.2f'
% (epoch, i, i + batch_size, avg_loss))
logger.info(
'time taken for %d examples: %.2f s, %.2f examples / s'
% (print_every * batch_size, time_taken, speed))
last_time = time.time()
avg_loss = 0.0
class PoincareBatch(object):
"""Compute Poincare distances, gradients and loss for a training batch.
Store intermediate state to avoid recomputing multiple times.
"""
def __init__(self, vectors_u, vectors_v, indices_u, indices_v, regularization_coeff=1.0):
"""
Initialize instance with sets of vectors for which distances are to be computed.
Parameters
----------
vectors_u : numpy.array
Vectors of all nodes `u` in the batch. Expected shape (batch_size, dim).
vectors_v : numpy.array
Vectors of all positively related nodes `v` and negatively sampled nodes `v'`,
for each node `u` in the batch. Expected shape (1 + neg_size, dim, batch_size).
indices_u : list of int
List of node indices for each of the vectors in `vectors_u`.
indices_v : list of lists of int
Nested list of lists, each of which is a list of node indices
for each of the vectors in `vectors_v` for a specific node `u`.
regularization_coeff : float, optional
Coefficient to use for l2-regularization
"""
self.vectors_u = vectors_u.T[np.newaxis, :, :] # (1, dim, batch_size)
self.vectors_v = vectors_v # (1 + neg_size, dim, batch_size)
self.indices_u = indices_u
self.indices_v = indices_v
self.regularization_coeff = regularization_coeff
self.poincare_dists = None
self.euclidean_dists = None
self.norms_u = None
self.norms_v = None
self.alpha = None
self.beta = None
self.gamma = None
self.gradients_u = None
self.distance_gradients_u = None
self.gradients_v = None
self.distance_gradients_v = None
self.loss = None
self._distances_computed = False
self._gradients_computed = False
self._distance_gradients_computed = False
self._loss_computed = False
def compute_all(self):
"""Convenience method to perform all computations."""
self.compute_distances()
self.compute_distance_gradients()
self.compute_gradients()
self.compute_loss()
def compute_distances(self):
"""Compute and store norms, euclidean distances and poincare distances between input vectors."""
if self._distances_computed:
return
euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1) # (1 + neg_size, batch_size)
norms_u = np.linalg.norm(self.vectors_u, axis=1) # (1, batch_size)
norms_v = np.linalg.norm(self.vectors_v, axis=1) # (1 + neg_size, batch_size)
alpha = 1 - norms_u ** 2 # (1, batch_size)
beta = 1 - norms_v ** 2 # (1 + neg_size, batch_size)
gamma = 1 + 2 * (
(euclidean_dists ** 2) / (alpha * beta)
) # (1 + neg_size, batch_size)
poincare_dists = np.arccosh(gamma) # (1 + neg_size, batch_size)
exp_negative_distances = np.exp(-poincare_dists) # (1 + neg_size, batch_size)
Z = exp_negative_distances.sum(axis=0) # (batch_size)
self.euclidean_dists = euclidean_dists
self.poincare_dists = poincare_dists
self.exp_negative_distances = exp_negative_distances
self.Z = Z
self.gamma = gamma
self.norms_u = norms_u
self.norms_v = norms_v
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._distances_computed = True
def compute_gradients(self):
"""Compute and store gradients of loss function for all input vectors."""
if self._gradients_computed:
return
self.compute_distances()
self.compute_distance_gradients()
# (1 + neg_size, dim, batch_size)
gradients_v = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_v
gradients_v /= self.Z # (1 + neg_size, dim, batch_size)
gradients_v[0] += self.distance_gradients_v[0]
gradients_v[0] += self.regularization_coeff * 2 * self.vectors_v[0]
# (1 + neg_size, dim, batch_size)
gradients_u = -self.exp_negative_distances[:, np.newaxis, :] * self.distance_gradients_u
gradients_u /= self.Z # (1 + neg_size, dim, batch_size)
gradients_u = gradients_u.sum(axis=0) # (dim, batch_size)
gradients_u += self.distance_gradients_u[0]
assert not np.isnan(gradients_u).any()
assert not np.isnan(gradients_v).any()
self.gradients_u = gradients_u
self.gradients_v = gradients_v
self._gradients_computed = True
def compute_distance_gradients(self):
"""Compute and store partial derivatives of poincare distance d(u, v) w.r.t all u and all v."""
if self._distance_gradients_computed:
return
self.compute_distances()
euclidean_dists_squared = self.euclidean_dists ** 2 # (1 + neg_size, batch_size)
# (1 + neg_size, 1, batch_size)
c_ = (4 / (self.alpha * self.beta * np.sqrt(self.gamma ** 2 - 1)))[:, np.newaxis, :]
# (1 + neg_size, 1, batch_size)
u_coeffs = ((euclidean_dists_squared + self.alpha) / self.alpha)[:, np.newaxis, :]
distance_gradients_u = u_coeffs * self.vectors_u - self.vectors_v # (1 + neg_size, dim, batch_size)
distance_gradients_u *= c_ # (1 + neg_size, dim, batch_size)
nan_gradients = self.gamma == 1 # (1 + neg_size, batch_size)
if nan_gradients.any():
distance_gradients_u.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_u = distance_gradients_u
# (1 + neg_size, 1, batch_size)
v_coeffs = ((euclidean_dists_squared + self.beta) / self.beta)[:, np.newaxis, :]
distance_gradients_v = v_coeffs * self.vectors_v - self.vectors_u # (1 + neg_size, dim, batch_size)
distance_gradients_v *= c_ # (1 + neg_size, dim, batch_size)
if nan_gradients.any():
distance_gradients_v.swapaxes(1, 2)[nan_gradients] = 0
self.distance_gradients_v = distance_gradients_v
self._distance_gradients_computed = True
def compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self.compute_distances()
self.loss = -np.log(self.exp_negative_distances[0] / self.Z).sum() # scalar
self._loss_computed = True
class PoincareKeyedVectors(BaseKeyedVectors):
"""Vectors and vocab for the :class:`~gensim.models.poincare.PoincareModel` training class.
Used to perform operations on the vectors such as vector lookup, distance calculations etc.
"""
def __init__(self, vector_size):
super(PoincareKeyedVectors, self).__init__(vector_size)
self.max_distance = 0
self.index2word = []
self.vocab = {}
@property
def vectors(self):
return self.syn0
@vectors.setter
def vectors(self, value):
self.syn0 = value
@property
def index2entity(self):
return self.index2word
@index2entity.setter
def index2entity(self, value):
self.index2word = value
def word_vec(self, word):
"""Get the word's representations in vector space, as a 1D numpy array.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Query the trained model.
>>> wv = model.kv.word_vec('kangaroo.n.01')
"""
return super(PoincareKeyedVectors, self).get_vector(word)
def words_closer_than(self, w1, w2):
"""Get all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Which term is closer to 'kangaroo' than 'metatherian' is to 'kangaroo'?
>>> model.kv.words_closer_than('kangaroo.n.01', 'metatherian.n.01')
[u'marsupial.n.01', u'phalanger.n.01']
"""
return super(PoincareKeyedVectors, self).closer_than(w1, w2)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility, using :func:`~gensim.models.utils_any2vec._save_word2vec_format`.
Parameters
----------
fname : str
Path to file that will be used for storing.
fvocab : str, optional
File path used to save the vocabulary.
binary : bool, optional
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int, optional
Explicitly specify total number of vectors
(in case word vectors are appended with document vectors afterwards).
"""
_save_word2vec_format(fname, self.vocab, self.syn0, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Use :func:`~gensim.models.utils_any2vec._load_word2vec_format`.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
Returns
-------
:class:`~gensim.models.poincare.PoincareModel`
Loaded Poincare model.
"""
return _load_word2vec_format(
cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
@staticmethod
def vector_distance(vector_1, vector_2):
"""Compute poincare distance between two input vectors. Convenience method over `vector_distance_batch`.
Parameters
----------
vector_1 : numpy.array
Input vector.
vector_2 : numpy.array
Input vector.
Returns
-------
numpy.float
Poincare distance between `vector_1` and `vector_2`.
"""
return PoincareKeyedVectors.vector_distance_batch(vector_1, vector_2[np.newaxis, :])[0]
@staticmethod
def vector_distance_batch(vector_1, vectors_all):
"""Compute poincare distances between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which Poincare distances are to be computed, expected shape (dim,).
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed, expected shape (num_vectors, dim).
Returns
-------
numpy.array
Poincare distance between `vector_1` and each row in `vectors_all`, shape (num_vectors,).
"""
euclidean_dists = np.linalg.norm(vector_1 - vectors_all, axis=1)
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
return np.arccosh(
1 + 2 * (
(euclidean_dists ** 2) / ((1 - norm ** 2) * (1 - all_norms ** 2))
)
)
def closest_child(self, node):
"""Get the node closest to `node` that is lower in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest child is to be found.
Returns
-------
{str, None}
Node closest to `node` that is lower in the hierarchy than `node`.
If there are no nodes lower in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm >= all_norms
if mask.all(): # No nodes lower in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def closest_parent(self, node):
"""Get the node closest to `node` that is higher in the hierarchy than `node`.
Parameters
----------
node : {str, int}
Key for node for which closest parent is to be found.
Returns
-------
{str, None}
Node closest to `node` that is higher in the hierarchy than `node`.
If there are no nodes higher in the hierarchy, None is returned.
"""
all_distances = self.distances(node)
all_norms = np.linalg.norm(self.syn0, axis=1)
node_norm = all_norms[self.vocab[node].index]
mask = node_norm <= all_norms
if mask.all(): # No nodes higher in the hierarchy
return None
all_distances = np.ma.array(all_distances, mask=mask)
closest_child_index = np.ma.argmin(all_distances)
return self.index2word[closest_child_index]
def descendants(self, node, max_depth=5):
"""Get the list of recursively closest children from the given node, up to a max depth of `max_depth`.
Parameters
----------
node : {str, int}
Key for node for which descendants are to be found.
max_depth : int
Maximum number of descendants to return.
Returns
-------
list of str
Descendant nodes from the node `node`.
"""
depth = 0
descendants = []
current_node = node
while depth < max_depth:
descendants.append(self.closest_child(current_node))
current_node = descendants[-1]
depth += 1
return descendants
def ancestors(self, node):
"""Get the list of recursively closest parents from the given node.
Parameters
----------
node : {str, int}
Key for node for which ancestors are to be found.
Returns
-------
list of str
Ancestor nodes of the node `node`.
"""
ancestors = []
current_node = node
ancestor = self.closest_parent(current_node)
while ancestor is not None:
ancestors.append(ancestor)
ancestor = self.closest_parent(ancestors[-1])
return ancestors
def distance(self, w1, w2):
"""Calculate Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : {str, int}
Key for first node.
w2 : {str, int}
Key for second node.
Returns
-------
float
Poincare distance between the vectors for nodes `w1` and `w2`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # What is the distance between the words 'mammal' and 'carnivore'?
>>> model.kv.distance('mammal.n.01', 'carnivore.n.01')
2.9742298803339304
Raises
------
KeyError
If either of `w1` and `w2` is absent from vocab.
"""
vector_1 = self.word_vec(w1)
vector_2 = self.word_vec(w2)
return self.vector_distance(vector_1, vector_2)
def similarity(self, w1, w2):
"""Compute similarity based on Poincare distance between vectors for nodes `w1` and `w2`.
Parameters
----------
w1 : {str, int}
Key for first node.
w2 : {str, int}
Key for second node.
Returns
-------
float
Similarity between the between the vectors for nodes `w1` and `w2` (between 0 and 1).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # What is the similarity between the words 'mammal' and 'carnivore'?
>>> model.kv.similarity('mammal.n.01', 'carnivore.n.01')
0.25162107631176484
Raises
------
KeyError
If either of `w1` and `w2` is absent from vocab.
"""
return 1 / (1 + self.distance(w1, w2))
def most_similar(self, node_or_vector, topn=10, restrict_vocab=None):
"""Find the top-N most similar nodes to the given node or vector, sorted in increasing order of distance.
Parameters
----------
node_or_vector : {str, int, numpy.array}
node key or vector for which similar nodes are to be found.
topn : int or None, optional
Number of top-N similar nodes to return, when `topn` is int. When `topn` is None,
then distance for all nodes are returned.
restrict_vocab : int or None, optional
Optional integer which limits the range of vectors which are searched for most-similar values.
For example, restrict_vocab=10000 would only check the first 10000 node vectors in the vocabulary order.
This may be meaningful if vocabulary is sorted by descending frequency.
Returns
--------
list of (str, float) or numpy.array
When `topn` is int, a sequence of (node, distance) is returned in increasing order of distance.
When `topn` is None, then similarities for all words are returned as a one-dimensional numpy array with the
size of the vocabulary.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Which words are most similar to 'kangaroo'?
>>> model.kv.most_similar('kangaroo.n.01', topn=2)
[(u'kangaroo.n.01', 0.0), (u'marsupial.n.01', 0.26524229460827725)]
"""
if isinstance(topn, Integral) and topn < 1:
return []
if not restrict_vocab:
all_distances = self.distances(node_or_vector)
else:
nodes_to_use = self.index2word[:restrict_vocab]
all_distances = self.distances(node_or_vector, nodes_to_use)
if isinstance(node_or_vector, string_types + (int,)):
node_index = self.vocab[node_or_vector].index
else:
node_index = None
if not topn:
closest_indices = matutils.argsort(all_distances)
else:
closest_indices = matutils.argsort(all_distances, topn=1 + topn)
result = [
(self.index2word[index], float(all_distances[index]))
for index in closest_indices if (not node_index or index != node_index) # ignore the input node
]
if topn:
result = result[:topn]
return result
def distances(self, node_or_vector, other_nodes=()):
"""Compute Poincare distances from given `node_or_vector` to all nodes in `other_nodes`.
If `other_nodes` is empty, return distance between `node_or_vector` and all nodes in vocab.
Parameters
----------
node_or_vector : {str, int, numpy.array}
Node key or vector from which distances are to be computed.
other_nodes : {iterable of str, iterable of int, None}, optional
For each node in `other_nodes` distance from `node_or_vector` is computed.
If None or empty, distance of `node_or_vector` from all nodes in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all nodes in `other_nodes` from input `node_or_vector`,
in the same order as `other_nodes`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Check the distances between a word and a list of other words.
>>> model.kv.distances('mammal.n.01', ['carnivore.n.01', 'dog.n.01'])
array([2.97422988, 2.83007402])
>>> # Check the distances between a word and every other word in the vocab.
>>> all_distances = model.kv.distances('mammal.n.01')
Raises
------
KeyError
If either `node_or_vector` or any node in `other_nodes` is absent from vocab.
"""
if isinstance(node_or_vector, string_types):
input_vector = self.word_vec(node_or_vector)
else:
input_vector = node_or_vector
if not other_nodes:
other_vectors = self.syn0
else:
other_indices = [self.vocab[node].index for node in other_nodes]
other_vectors = self.syn0[other_indices]
return self.vector_distance_batch(input_vector, other_vectors)
def norm(self, node_or_vector):
"""Compute absolute position in hierarchy of input node or vector.
Values range between 0 and 1. A lower value indicates the input node or vector is higher in the hierarchy.
Parameters
----------
node_or_vector : {str, int, numpy.array}
Input node key or vector for which position in hierarchy is to be returned.
Returns
-------
float
Absolute position in the hierarchy of the input vector or node.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> # Get the norm of the embedding of the word `mammal`.
>>> model.kv.norm('mammal.n.01')
0.6423008703542398
Notes
-----
The position in hierarchy is based on the norm of the vector for the node.
"""
if isinstance(node_or_vector, string_types):
input_vector = self.word_vec(node_or_vector)
else:
input_vector = node_or_vector
return np.linalg.norm(input_vector)
def difference_in_hierarchy(self, node_or_vector_1, node_or_vector_2):
"""Compute relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
A positive value indicates `node_or_vector_1` is higher in the hierarchy than `node_or_vector_2`.
Parameters
----------
node_or_vector_1 : {str, int, numpy.array}
Input node key or vector.
node_or_vector_2 : {str, int, numpy.array}
Input node key or vector.
Returns
-------
float
Relative position in hierarchy of `node_or_vector_1` relative to `node_or_vector_2`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> # Read the sample relations file and train the model
>>> relations = PoincareRelations(file_path=datapath('poincare_hypernyms_large.tsv'))
>>> model = PoincareModel(train_data=relations)
>>> model.train(epochs=50)
>>>
>>> model.kv.difference_in_hierarchy('mammal.n.01', 'dog.n.01')
0.05382517902410999
>>> model.kv.difference_in_hierarchy('dog.n.01', 'mammal.n.01')
-0.05382517902410999
Notes
-----
The returned value can be positive or negative, depending on whether `node_or_vector_1` is higher
or lower in the hierarchy than `node_or_vector_2`.
"""
return self.norm(node_or_vector_2) - self.norm(node_or_vector_1)
class PoincareRelations(object):
"""Stream relations for `PoincareModel` from a tsv-like file."""
def __init__(self, file_path, encoding='utf8', delimiter='\t'):
"""Initialize instance from file containing a pair of nodes (a relation) per line.
Parameters
----------
file_path : str
Path to file containing a pair of nodes (a relation) per line, separated by `delimiter`.
Since the relations are asymmetric, the order of `u` and `v` nodes in each pair matters.
To express a "u is v" relation, the lines should take the form `u delimeter v`.
e.g: `kangaroo mammal` is a tab-delimited line expressing a "`kangaroo is a mammal`" relation.
For a full input file example, see `gensim/test/test_data/poincare_hypernyms.tsv
<https://github.com/RaRe-Technologies/gensim/blob/master/gensim/test/test_data/poincare_hypernyms.tsv>`_.
encoding : str, optional
Character encoding of the input file.
delimiter : str, optional
Delimiter character for each relation.
"""
self.file_path = file_path
self.encoding = encoding
self.delimiter = delimiter
def __iter__(self):
"""Stream relations from self.file_path decoded into unicode strings.
Yields
-------
(unicode, unicode)
Relation from input file.
"""
with utils.open(self.file_path, 'rb') as file_obj:
if sys.version_info[0] < 3:
lines = file_obj
else:
lines = (l.decode(self.encoding) for l in file_obj)
# csv.reader requires bytestring input in python2, unicode input in python3
reader = csv.reader(lines, delimiter=self.delimiter)
for row in reader:
if sys.version_info[0] < 3:
row = [value.decode(self.encoding) for value in row]
yield tuple(row)
class NegativesBuffer(object):
"""Buffer and return negative samples."""
def __init__(self, items):
"""Initialize instance from list or numpy array of samples.
Parameters
----------
items : list/numpy.array
List or array containing negative samples.
"""
self._items = items
self._current_index = 0
def num_items(self):
"""Get the number of items remaining in the buffer.
Returns
-------
int
Number of items in the buffer that haven't been consumed yet.
"""
return len(self._items) - self._current_index
def get_items(self, num_items):
"""Get the next `num_items` from buffer.
Parameters
----------
num_items : int
Number of items to fetch.
Returns
-------
numpy.array or list
Slice containing `num_items` items from the original data.
Notes
-----
No error is raised if less than `num_items` items are remaining,
simply all the remaining items are returned.
"""
start_index = self._current_index
end_index = start_index + num_items
self._current_index += num_items
return self._items[start_index:end_index]
class ReconstructionEvaluation(object):
"""Evaluate reconstruction on given network for given embedding."""
def __init__(self, file_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
file_path : str
Path to tsv file containing relation pairs.
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to be evaluated.
"""
items = set()
embedding_vocab = embedding.vocab
relations = defaultdict(set)
with utils.open(file_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding_vocab[row[0]].index
item_2_index = embedding_vocab[row[1]].index
relations[item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_positive_relation_ranks_and_avg_prec(all_distances, positive_relations):
"""Compute ranks and Average Precision of positive relations.
Parameters
----------
all_distances : numpy.array of float
Array of all distances (floats) for a specific item.
positive_relations : list
List of indices of positive relations for the item.
Returns
-------
(list of int, float)
The list contains ranks of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking, e.g. ([1, 2, 3, 20], 0.610).
"""
positive_relation_distances = all_distances[positive_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[positive_relations] = True
# Compute how many negative relation distances are less than each positive relation distance, plus 1 for rank
ranks = (negative_relation_distances < positive_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the reconstruction task.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict of (str, float)
(metric_name, metric_value) pairs, e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for reconstruction.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
(float, float)
(mean_rank, MAP), e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations:
continue
item_relations = list(self.relations[item])
item_term = self.embedding.index2word[item]
item_distances = self.embedding.distances(item_term)
positive_relation_ranks, avg_precision = \
self.get_positive_relation_ranks_and_avg_prec(item_distances, item_relations)
ranks += positive_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LinkPredictionEvaluation(object):
"""Evaluate reconstruction on given network for given embedding."""
def __init__(self, train_path, test_path, embedding):
"""Initialize evaluation instance with tsv file containing relation pairs and embedding to be evaluated.
Parameters
----------
train_path : str
Path to tsv file containing relation pairs used for training.
test_path : str
Path to tsv file containing relation pairs to evaluate.
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to be evaluated.
"""
items = set()
embedding_vocab = embedding.vocab
relations = {'known': defaultdict(set), 'unknown': defaultdict(set)}
data_files = {'known': train_path, 'unknown': test_path}
for relation_type, data_file in data_files.items():
with utils.open(data_file, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
assert len(row) == 2, 'Hypernym pair has more than two items'
item_1_index = embedding_vocab[row[0]].index
item_2_index = embedding_vocab[row[1]].index
relations[relation_type][item_1_index].add(item_2_index)
items.update([item_1_index, item_2_index])
self.items = items
self.relations = relations
self.embedding = embedding
@staticmethod
def get_unknown_relation_ranks_and_avg_prec(all_distances, unknown_relations, known_relations):
"""Compute ranks and Average Precision of unknown positive relations.
Parameters
----------
all_distances : numpy.array of float
Array of all distances for a specific item.
unknown_relations : list of int
List of indices of unknown positive relations.
known_relations : list of int
List of indices of known positive relations.
Returns
-------
tuple (list of int, float)
The list contains ranks of positive relations in the same order as `positive_relations`.
The float is the Average Precision of the ranking, e.g. ([1, 2, 3, 20], 0.610).
"""
unknown_relation_distances = all_distances[unknown_relations]
negative_relation_distances = np.ma.array(all_distances, mask=False)
negative_relation_distances.mask[unknown_relations] = True
negative_relation_distances.mask[known_relations] = True
# Compute how many negative relation distances are less than each unknown relation distance, plus 1 for rank
ranks = (negative_relation_distances < unknown_relation_distances[:, np.newaxis]).sum(axis=1) + 1
map_ranks = np.sort(ranks) + np.arange(len(ranks))
avg_precision = ((np.arange(1, len(map_ranks) + 1) / np.sort(map_ranks)).mean())
return list(ranks), avg_precision
def evaluate(self, max_n=None):
"""Evaluate all defined metrics for the link prediction task.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
dict of (str, float)
(metric_name, metric_value) pairs, e.g. {'mean_rank': 50.3, 'MAP': 0.31}.
"""
mean_rank, map_ = self.evaluate_mean_rank_and_map(max_n)
return {'mean_rank': mean_rank, 'MAP': map_}
def evaluate_mean_rank_and_map(self, max_n=None):
"""Evaluate mean rank and MAP for link prediction.
Parameters
----------
max_n : int, optional
Maximum number of positive relations to evaluate, all if `max_n` is None.
Returns
-------
tuple (float, float)
(mean_rank, MAP), e.g (50.3, 0.31).
"""
ranks = []
avg_precision_scores = []
for i, item in enumerate(self.items, start=1):
if item not in self.relations['unknown']: # No positive relations to predict for this node
continue
unknown_relations = list(self.relations['unknown'][item])
known_relations = list(self.relations['known'][item])
item_term = self.embedding.index2word[item]
item_distances = self.embedding.distances(item_term)
unknown_relation_ranks, avg_precision = \
self.get_unknown_relation_ranks_and_avg_prec(item_distances, unknown_relations, known_relations)
ranks += unknown_relation_ranks
avg_precision_scores.append(avg_precision)
if max_n is not None and i > max_n:
break
return np.mean(ranks), np.mean(avg_precision_scores)
class LexicalEntailmentEvaluation(object):
"""Evaluate reconstruction on given network for any embedding."""
def __init__(self, filepath):
"""Initialize evaluation instance with HyperLex text file containing relation pairs.
Parameters
----------
filepath : str
Path to HyperLex text file.
"""
expected_scores = {}
with utils.open(filepath, 'r') as f:
reader = csv.DictReader(f, delimiter=' ')
for row in reader:
word_1, word_2 = row['WORD1'], row['WORD2']
expected_scores[(word_1, word_2)] = float(row['AVG_SCORE'])
self.scores = expected_scores
self.alpha = 1000
def score_function(self, embedding, trie, term_1, term_2):
"""Compute predicted score - extent to which `term_1` is a type of `term_2`.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding to use for computing predicted score.
trie : :class:`pygtrie.Trie`
Trie to use for finding matching vocab terms for input terms.
term_1 : str
Input term.
term_2 : str
Input term.
Returns
-------
float
Predicted score (the extent to which `term_1` is a type of `term_2`).
"""
try:
word_1_terms = self.find_matching_terms(trie, term_1)
word_2_terms = self.find_matching_terms(trie, term_2)
except KeyError:
raise ValueError("No matching terms found for either %s or %s" % (term_1, term_2))
min_distance = np.inf
min_term_1, min_term_2 = None, None
for term_1 in word_1_terms:
for term_2 in word_2_terms:
distance = embedding.distance(term_1, term_2)
if distance < min_distance:
min_term_1, min_term_2 = term_1, term_2
min_distance = distance
assert min_term_1 is not None and min_term_2 is not None
vector_1, vector_2 = embedding.word_vec(min_term_1), embedding.word_vec(min_term_2)
norm_1, norm_2 = np.linalg.norm(vector_1), np.linalg.norm(vector_2)
return -1 * (1 + self.alpha * (norm_2 - norm_1)) * min_distance
@staticmethod
def find_matching_terms(trie, word):
"""Find terms in the `trie` beginning with the `word`.
Parameters
----------
trie : :class:`pygtrie.Trie`
Trie to use for finding matching terms.
word : str
Input word to use for prefix search.
Returns
-------
list of str
List of matching terms.
"""
matches = trie.items('%s.' % word)
matching_terms = [''.join(key_chars) for key_chars, value in matches]
return matching_terms
@staticmethod
def create_vocab_trie(embedding):
"""Create trie with vocab terms of the given embedding to enable quick prefix searches.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding for which trie is to be created.
Returns
-------
:class:`pygtrie.Trie`
Trie containing vocab terms of the input embedding.
"""
try:
from pygtrie import Trie
except ImportError:
raise ImportError(
'pygtrie could not be imported, please install pygtrie in order to use LexicalEntailmentEvaluation')
vocab_trie = Trie()
for key in embedding.vocab:
vocab_trie[key] = True
return vocab_trie
def evaluate_spearman(self, embedding):
"""Evaluate spearman scores for lexical entailment for given embedding.
Parameters
----------
embedding : :class:`~gensim.models.poincare.PoincareKeyedVectors`
Embedding for which evaluation is to be done.
Returns
-------
float
Spearman correlation score for the task for input embedding.
"""
predicted_scores = []
expected_scores = []
skipped = 0
count = 0
vocab_trie = self.create_vocab_trie(embedding)
for (word_1, word_2), expected_score in self.scores.items():
try:
predicted_score = self.score_function(embedding, vocab_trie, word_1, word_2)
except ValueError:
skipped += 1
continue
count += 1
predicted_scores.append(predicted_score)
expected_scores.append(expected_score)
logger.info('skipped pairs: %d out of %d' % (skipped, len(self.scores)))
spearman = spearmanr(expected_scores, predicted_scores)
return spearman.correlation
| napsternxg/gensim | gensim/models/poincare.py | Python | gpl-3.0 | 75,098 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 David García Goñi
#
# This file is part of Phatty.
#
# Phatty is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Phatty is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Phatty. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import phatty
import mido
from mido import Message
import mock
from mock import Mock
from mock import call
from phatty.connector import Connector
from struct import unpack
BAD_BANK_FILE_NAME = os.path.join(
os.path.dirname(__file__), 'resources/preset.syx')
BANK_FILE_NAME = os.path.join(os.path.dirname(__file__), 'resources/bank.syx')
BULK_FILE_NAME = os.path.join(os.path.dirname(__file__), 'resources/bulk.syx')
class Test(unittest.TestCase):
def setUp(self):
self.connector = Connector()
self.connector.port = Mock()
def test_get_panel_as_preset(self):
def return_value():
return [i for i in range(0, 192)]
self.connector.get_panel = Mock(side_effect=return_value)
value = self.connector.get_panel_as_preset(37)
self.connector.get_panel.assert_called_once()
self.assertEqual(value[2], 0x5)
self.assertEqual(value[4], 37)
def test_get_panel(self):
def return_value():
return [i for i in range(0, 192)]
self.connector.tx_message = Mock()
self.connector.rx_message = Mock(side_effect=return_value)
value = self.connector.get_panel()
self.connector.tx_message.assert_called_once_with(
phatty.connector.REQUEST_PANEL)
self.connector.rx_message.assert_called_once()
self.assertEqual(value, return_value())
def test_get_preset(self):
def return_value():
return [i for i in range(0, 192)]
self.connector.tx_message = Mock()
self.connector.rx_message = Mock(side_effect=return_value)
value = self.connector.get_preset(37)
msg = []
msg.extend(phatty.connector.REQUEST_PATCH)
msg[phatty.connector.REQ_PATCH_BYTE] = 37
self.connector.tx_message.assert_called_once_with(msg)
self.connector.rx_message.assert_called_once()
self.assertEqual(value, return_value())
def test_set_preset(self):
self.connector.port.send = Mock()
self.connector.set_preset(37)
msg = Message('program_change', channel=0, program=37)
self.connector.port.send.assert_called_once_with(msg)
def test_set_bulk(self):
try:
data = []
data.extend(phatty.connector.BULK_START)
data.extend([0] * (phatty.connector.BULK_SIZE -
len(phatty.connector.BULK_START)))
self.connector.tx_message = Mock()
self.connector.set_bulk(data)
self.connector.tx_message.assert_called_once_with(data)
except ValueError as e:
self.assertTrue(False)
def test_set_bulk_red(self):
try:
data = []
data.extend(phatty.connector.BULK_START)
data.extend([0] * (phatty.connector.RED_BULK_SIZE -
len(phatty.connector.BULK_START)))
self.connector.tx_message = Mock()
self.connector.set_bulk(data)
self.connector.tx_message.assert_called_once_with(data)
except ValueError as e:
self.assertTrue(False)
def test_set_bulk_fail(self):
try:
data = []
self.connector.set_bulk(data)
self.assertTrue(False)
except ValueError as e:
self.assertTrue(str(e) == phatty.connector.INVALID_BULK_FILE)
def test_set_bank(self):
try:
data = []
data.extend(phatty.connector.BANK_START)
data.extend([0] * (phatty.connector.BANK_SIZE -
len(phatty.connector.BANK_START)))
self.connector.tx_message = Mock()
self.connector.set_bank(data)
self.connector.tx_message.assert_called_once_with(data)
except ValueError as e:
self.assertTrue(False)
def test_set_bank_red(self):
try:
data = []
data.extend(phatty.connector.BANK_START)
data.extend([0] * (phatty.connector.RED_BANK_SIZE -
len(phatty.connector.BANK_START)))
self.connector.tx_message = Mock()
self.connector.set_bank(data)
self.connector.tx_message.assert_called_once_with(data)
except ValueError as e:
self.assertTrue(False)
def test_set_bank_fail(self):
try:
data = []
self.connector.set_bank(data)
self.assertTrue(False)
except ValueError as e:
self.assertTrue(str(e) == phatty.connector.INVALID_BANK_FILE)
def set_bank_from_file(self, filename):
data = mido.read_syx_file(filename)[0].bytes()
data = data[1:len(data) - 1]
self.connector.set_bank_from_file(filename)
return data
def test_set_bank_from_bank_file(self):
self.connector.set_bank = Mock()
data = self.set_bank_from_file(BANK_FILE_NAME)
self.connector.set_bank.assert_called_once_with(data)
def test_set_bank_from_bulk_file(self):
self.connector.set_bank = Mock(side_effect=ValueError)
self.connector.set_bulk = Mock()
data = self.set_bank_from_file(BULK_FILE_NAME)
self.connector.set_bank.assert_called_once_with(data)
self.connector.set_bulk.assert_called_once_with(data)
def test_set_bank_from_bank_file_error(self):
try:
self.connector.set_bank = Mock(side_effect=ValueError)
self.connector.set_bank_from_file(BAD_BANK_FILE_NAME)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_write_data_to_file(self):
data = [1, 2, 3]
filename = 'foo'
messages = [Message('sysex', data=data)]
mido.write_syx_file = Mock()
self.connector.write_data_to_file(filename, data)
mido.write_syx_file.assert_called_once_with(filename, messages)
def return_sysex(filename):
data = [1, 2, 3]
return [Message('sysex', data=data)]
@mock.patch('mido.read_syx_file', side_effect=return_sysex)
def test_read_data_from_file(self, mock):
filename = 'foo'
data = self.connector.read_data_from_file(filename)
mido.read_syx_file.assert_called_once_with(filename)
self.assertEqual(data, [1, 2, 3])
def test_set_panel_name(self):
name = 'ABCabc123'
calls = []
calls.append(call(
Message('control_change', channel=0, control=119, value=0)))
calls.append(call(
Message('control_change', channel=0, control=66, value=19)))
calls.append(call(
Message('control_change', channel=0, control=66, value=15)))
calls.append(call(
Message('control_change', channel=0, control=66, value=13)))
calls.append(call(
Message('control_change', channel=0, control=66, value=1)))
for c in name:
calls.append(call(
Message('control_change', channel=0, control=66, value=ord(c))))
self.connector.port.send = Mock()
self.connector.set_panel_name(name)
self.connector.port.send.assert_has_calls(calls, any_order=False)
def check_send_message(self, function, control, array):
for i in range(0, len(array)):
message = Message('control_change', channel=0,
control=control, value=array[i])
self.connector.port.send = Mock()
function(i)
self.connector.port.send.assert_called_once_with(message)
def test_set_lfo_midi_sync(self):
self.check_send_message(
self.connector.set_lfo_midi_sync, 102, phatty.connector.LFO_MIDI_SYNC_VALUES)
def test_set_panel_filter_poles(self):
self.check_send_message(
self.connector.set_panel_filter_poles, 109, phatty.connector.FILTER_POLES_VALUES)
def test_set_panel_vel_to_filter(self):
self.check_send_message(
self.connector.set_panel_vel_to_filter, 110, phatty.connector.VEL_TO_FILTER_VALUES)
def test_set_panel_vel_to_amp(self):
self.check_send_message(
self.connector.set_panel_vel_to_amp, 92, phatty.connector.VEL_TO_AMP_VALUES)
def test_set_panel_release(self):
self.check_send_message(
self.connector.set_panel_release, 88, phatty.connector.RELEASE_VALUES)
def test_set_panel_scale(self):
self.check_send_message(
self.connector.set_panel_scale, 113, phatty.connector.SCALE_VALUES)
def test_set_panel_pw_up_amount(self):
self.check_send_message(
self.connector.set_panel_pw_up_amount, 107, phatty.connector.PW_VALUES)
def test_set_panel_pw_down_amount(self):
self.check_send_message(
self.connector.set_panel_pw_down_amount, 108, phatty.connector.PW_VALUES)
def test_set_panel_legato(self):
self.check_send_message(
self.connector.set_panel_legato, 112, phatty.connector.LEGATO_VALUES)
def test_set_panel_keyboard_priority(self):
self.check_send_message(
self.connector.set_panel_keyboard_priority, 111, phatty.connector.KEYBOARD_PRIORITY_VALUES)
def test_set_panel_glide_on_legato(self):
self.check_send_message(
self.connector.set_panel_glide_on_legato, 94, phatty.connector.RELEASE_VALUES)
def test_set_panel_mod_source_5(self):
self.check_send_message(
self.connector.set_panel_mod_source_5, 104, phatty.connector.MOD_SRC_5_VALUES)
def test_set_panel_mod_source_6(self):
self.check_send_message(
self.connector.set_panel_mod_source_6, 105, phatty.connector.MOD_SRC_6_VALUES)
def test_set_panel_mod_dest_2(self):
self.check_send_message(
self.connector.set_panel_mod_dest_2, 106, phatty.connector.MOD_DEST_2_VALUES)
def test_set_panel_lfo_key_retrigger(self):
self.check_send_message(
self.connector.set_panel_lfo_key_retrigger, 93, phatty.connector.LFO_RETRIGGER_VALUES)
def test_set_panel_arp_octaves(self):
self.check_send_message(
self.connector.set_panel_arp_pattern, 117, phatty.connector.ARP_PATTERN_VALUES)
def test_set_panel_arp_mode(self):
self.check_send_message(
self.connector.set_panel_arp_mode, 118, phatty.connector.ARP_MODE_VALUES)
def test_set_panel_arp_octaves(self):
self.check_send_message(
self.connector.set_panel_arp_octaves, 116, phatty.connector.ARP_OCTAVES_VALUES)
def test_set_panel_arp_gate(self):
self.check_send_message(
self.connector.set_panel_arp_gate, 95, phatty.connector.ARP_GATE_VALUES)
def test_set_panel_arp_clock_source(self):
self.check_send_message(
self.connector.set_panel_arp_clock_source, 114, phatty.connector.ARP_CLOCK_SOURCE_VALUES)
def test_set_panel_arp_clock_division(self):
self.check_send_message(
self.connector.set_panel_arp_clock_division, 115, phatty.connector.ARP_CLOCK_DIVISION_VALUES)
| dagargo/phatty | tests/test_connector.py | Python | gpl-3.0 | 11,804 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021-2022 Daniel Estevez <daniel@destevez.net>
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr, digital
import pmt
from ...hier.sync_to_pdu_packed import sync_to_pdu_packed
from ...hdlc_deframer import hdlc_crc_check
# HDLC 0x7e flag
_syncword = '01111110'
class crop_and_check_crc(gr.basic_block):
"""
Helper block to crop using the final 0x7e flag and check CRC-16
"""
def __init__(self):
gr.basic_block.__init__(
self,
name='crop_and_check_crc',
in_sig=[],
out_sig=[])
self.crc_check = hdlc_crc_check()
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.message_port_register_out(pmt.intern('out'))
def handle_msg(self, msg_pmt):
msg = pmt.cdr(msg_pmt)
if not pmt.is_u8vector(msg):
print('[ERROR] Received invalid message type. Expected u8vector')
return
packet = pmt.u8vector_elements(msg)
start = 0
while True:
try:
idx = packet[start:].index(0x7e)
except ValueError:
return
start += idx + 1
p = packet[:idx]
if self.crc_check.fcs_ok(p):
p = p[:-2]
self.message_port_pub(
pmt.intern('out'),
pmt.cons(pmt.PMT_NIL, pmt.init_u8vector(len(p), p)))
return
class yusat_deframer(gr.hier_block2):
"""
Hierarchical block to deframe YUSAT ad-hoc AX.25-like protocol
The input is a float stream of soft symbols. The output are PDUs
with YUSAT frames.
Args:
options: Options from argparse
"""
def __init__(self, options=None):
gr.hier_block2.__init__(
self,
'yusat_deframer',
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(0, 0, 0))
self.message_port_register_hier_out('out')
self.slicer = digital.binary_slicer_fb()
# We hope that 256 bytes is long enough to contain the full packet
self.deframer = sync_to_pdu_packed(
packlen=256, sync=_syncword, threshold=0)
self.crop = crop_and_check_crc()
self.connect(self, self.slicer, self.deframer)
self.msg_connect((self.deframer, 'out'), (self.crop, 'in'))
self.msg_connect((self.crop, 'out'), (self, 'out'))
| daniestevez/gr-satellites | python/components/deframers/yusat_deframer.py | Python | gpl-3.0 | 2,570 |
from piston.handler import BaseHandler
from piston.utils import rc
from telemaco.models import User
from django.db.utils import IntegrityError
class RegistrationHandler(BaseHandler):
class _UserProxy(User): pass
allowed_methods = ('POST')
model = _UserProxy
def create(self, request):
try:
# Sing-up
print "Create user:", request.data
obj = User(username=request.data['username'])
obj.set_password(request.data['password'])
obj.save()
return rc.CREATED
except IntegrityError, e:
print e
return rc.DUPLICATE_ENTRY
except Exception, e:
print e
return rc.BAD_REQUEST
| diegomartin/Telemaco | TelemacoServer/src/api/RegistrationHandler.py | Python | gpl-3.0 | 789 |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.template import loader
from django.views import generic
from .scripts.vagrant_boxes import _box_list, _global_status, _deps_versions
class IndexView(generic.ListView):
template_name = 'manager/index.html'
def get(self, request):
versions = _deps_versions()
vboxes = _box_list()
venvs = _global_status()
return render(request, self.template_name, {'all_boxes': vboxes, 'all_envs': venvs, 'versions': versions, })
| speedlight/roombox | manager/views.py | Python | gpl-3.0 | 554 |
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV, htr_to_eV
import argparser
import norm_k
import numpy as np
import scf
import system
args = argparser.read_argument('Evaluate step-like feature in electron-phonon coupling')
thres = args.thres / htr_to_meV
beta = htr_to_K / args.temp
Sigma = system.make_data(args.dft, args.vb)
Sigma.bose_einstein = bose_einstein(Sigma.freq, beta)
for energy_meV in np.arange(0.0, args.energy, 0.5):
energy = energy_meV / htr_to_meV
kk = norm_k.eval(Sigma.eff_mass, energy)
Sigma_in = 1e-3j / htr_to_meV
Sigma_out, it = scf.self_energy(args.method, thres, Sigma, kk, Sigma_in)
if args.vb: real_energy = -energy
else: real_energy = energy
print(real_energy * htr_to_meV, -Sigma_out.imag * htr_to_meV, it)
| mmdg-oxford/papers | Schlipf-PRL-2018/model/step.py | Python | gpl-3.0 | 830 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^books/$', views.BookListView.as_view(), name='books'),
url(r'^books/(?P<pk>\d+)$', views.BookDetailView.as_view(), name='book-detail'),
url(r'^books/(?P<pk>[-\w]+)/renew/$', views.renew_book_librarian, name='renew-book-librarian'),
url(r'^books/create/$', views.BookCreate.as_view(), name='book-create'),
url(r'^books/(?P<pk>\d+)/update/$', views.BookUpdate.as_view(), name='book-update'),
url(r'^books/(?P<pk>\d+)/delete/$', views.BookDelete.as_view(), name='book-delete'),
url(r'^authors/$', views.AuthorListView.as_view(), name='authors'),
url(r'^authors/(?P<pk>\d+)$', views.AuthorDetailView.as_view(), name='author-detail'),
url(r'^authors/create/$', views.AuthorCreate.as_view(), name='author-create'),
url(r'^authors/(?P<pk>\d+)/update/$', views.AuthorUpdate.as_view(), name='author-update'),
url(r'^authors/(?P<pk>\d+)/delete/$', views.AuthorDelete.as_view(), name='author-delete'),
url(r'^mybooks/$', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),
url(r'^borrowed/$', views.BorrowedBooksListView.as_view(), name='books-borrowed'),
]
| tridc/django_local_library | catalog/urls.py | Python | gpl-3.0 | 1,232 |
#coding: latin1
label = 'Prototyp einer Homepage für den V.O.R.'
def populate(db):
import vor1
vor1.populate(db)
| MaxTyutyunnikov/lino | obsolete/demo/vor/__init__.py | Python | gpl-3.0 | 118 |
from MakeGraph import MakeGraph
from Moving_pacman import PacMan
import pygame
class Ghost(MakeGraph):
index = 0
def __init__(self,class_graph,x,y):
Ghost.index = Ghost.index + 1
self.all_nodes = class_graph.get_nodes()
self.paths_to_all_nodes = class_graph.get_shortest_path()
self.path = []
self.hunting = False
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
self.name_image = self.name_image_u
self.cords={'x': x, 'y': y}
# {'x': 92, 'y': 161}
self.index = Ghost.index
def next_hop(self):
if self.path:
return self.path[0]
return []
def find_ghost_cords(self):
ghost_x = int(self.cords['y']/23)
ghost_y = int(self.cords['x']/23)
return (ghost_x,ghost_y)
def get_pictures(self):
if self.index == 0 :
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
if self.index == 1:
self.name_image_u = "Ghost_orange_up"
self.name_image_d = "Ghost_orange_down"
self.name_image_l = "Ghost_orange_left"
self.name_image_r = "Ghost_orange_right"
if self.index == 2:
self.name_image_u = "Ghost_pink_up"
self.name_image_d = "Ghost_pink_down"
self.name_image_l = "Ghost_pink_left"
self.name_image_r = "Ghost_pink_right"
if self.index == 3:
self.name_image_u = "Ghost_cyan_up"
self.name_image_d = "Ghost_cyan_down"
self.name_image_l = "Ghost_cyan_left"
self.name_image_r = "Ghost_cyan_right"
def find_closest_nodes(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
Visited = [vertex]
# if vertex in all_Nodes:
# all_Nodes.remove(vertex)
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in Visited:
if v_adj in self.all_nodes:
closest_nodes.append((v_adj[1],v_adj[0]))
else:
queue.append(v_adj)
Visited.append(v_adj)
return closest_nodes
def find_closest_vertex(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
# print (self.all_nodes)
if vertex in self.all_nodes:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
map_to_a_vertex[v_adj] = new_v
if v_adj in self.all_nodes:
full_path = [v_adj]
while map_to_a_vertex[v_adj] != vertex:
v_adj = map_to_a_vertex[v_adj]
full_path.insert(0,v_adj)
return full_path
if MakeGraph.is_p_vertex(self,v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
def ghost_move(self, pacman_vertex, pacman_cords):
my_cords = (int(self.cords['y']/23),int(self.cords['x']/23))
if my_cords == pacman_vertex:
self.hunting = True
if self.hunting == True:
self.path = self.search_eat(pacman_cords)
if not self.path:
if self.hunting == True:
self.hunting = False
if self.find_closest_vertex() != []:
self.path = self.find_closest_vertex()
else:
for i in self.paths_to_all_nodes[my_cords][pacman_vertex]:
self.path.extend(2*[i])
def ghost_make_move(self):
# if not self.path:
# self.ghost_move(screen,pacman_vertex,pacman_cords)
new_step = self.path.pop(0)
old_step = (int(self.cords['y'] / 23),int(self.cords['x'])/23)
if old_step[0] == new_step[0] and old_step[1]<new_step[1]:
self.name_image = self.name_image_r
if old_step[0] == new_step[0] and old_step[1]>new_step[1]:
self.name_image = self.name_image_l
if old_step[0] < new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_d
if old_step[0] > new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_u
self.cords['y'] = new_step[0]*23
self.cords['x'] = new_step[1]*23
def search_eat(self,pacman_cords):
closest_nodes =[]
# pacman_x = int(pacman_cords['x']/23)
# pacman_y = int(pacman_cords['y']/23)
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
if vertex == pacman_cords:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
map_to_a_vertex[v_adj] = new_v
if v_adj == pacman_cords:
# map_to_a_vertex[v_adj] = new_v
# print(map_to_a_vertex)
# print("abc",v_adj,new_v)
while map_to_a_vertex[v_adj] != vertex:
# print("abc",v_adj)
v_adj = map_to_a_vertex[v_adj]
return [v_adj]
return []
def draw_ghost(self,screen):
ghost = pygame.image.load("Ghosts/Ghost_cyan_down.png")
# print(self.find_closest_vertex())
self.ghost_move(screen,(14,13),(16,14))
# p = self.path[-1]
# pygame.draw.rect(screen, (124, 124, 0),
# (p[1]* 23, p[0] * 23, 23, 23))
screen.blit(ghost,(self.cords['x'], self.cords['y']))
| Yordan92/Pac-man-multiplayer | Ghosts.py | Python | gpl-3.0 | 5,718 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ca', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='cn',
field=models.CharField(max_length=64, verbose_name='CommonName'),
),
migrations.AlterField(
model_name='certificate',
name='csr',
field=models.TextField(verbose_name='CSR'),
),
migrations.AlterField(
model_name='certificate',
name='pub',
field=models.TextField(verbose_name='Public key'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Revoked on'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_reason',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Reason for revokation'),
),
]
| fsinf/certificate-authority | ca/django_ca/migrations/0002_auto_20151223_1508.py | Python | gpl-3.0 | 1,224 |
""" Copyright 2012, July 31
Written by Pattarapol (Cheer) Iamngamsup
E-mail: IAM.PATTARAPOL@GMAIL.COM
Sum square difference
Problem 6
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of
the first ten natural numbers and the square of the sum is
3025 385 = 2640.
Find the difference between the sum of the squares of
the first one hundred natural numbers and the square of the sum.
"""
#################################################
# Importing libraries & modules
import datetime
#################################################
# Global variables
#################################################
# Functions
#################################################
# Classes
#################################################
# Main function
def main():
squareOfSum = ( ( ( 1+100 ) * 100 ) / 2)**2
sumOfSquare = 0
for i in range( 1, 101 ):
sumOfSquare += i*i
print( 'answer = {0}'.format( squareOfSum - sumOfSquare ) )
#################################################
# Main execution
if __name__ == '__main__':
# get starting date time
startingDateTime = datetime.datetime.utcnow()
print( 'startingDateTime = {0} UTC'.format( startingDateTime ) )
# call main function
main()
# get ending date time
endingdateTime = datetime.datetime.utcnow()
print( 'endingdateTime = {0} UTC'.format( endingdateTime ) )
# compute delta date time
deltaDateTime = endingdateTime - startingDateTime
print( 'deltaDateTime = {0}'.format( deltaDateTime ) )
| pattarapol-iamngamsup/projecteuler_python | problem_006.py | Python | gpl-3.0 | 1,747 |
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from ..models import MyUser, Profile
from ..utils import perform_reputation_check
class CreateUserSerializer(serializers.ModelSerializer):
password = serializers.CharField(
style={'input_type': 'password'}
)
class Meta:
model = MyUser
fields = ('email', 'password', 'first_name', 'last_name')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = MyUser.objects.create_user(
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
password=validated_data['password']
)
return user
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = MyUser
fields = [
'id',
'email',
'first_name',
'last_name',
]
extra_kwargs = {'id': {'read_only': True}, 'email': {'read_only': True}}
def create(self, validated_data):
user = MyUser.objects.create_user(
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.last_name = validated_data.get('last_name', instance.last_name)
instance.save()
return instance
class FollowSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='profiles:profile-detail')
full_name = serializers.SerializerMethodField()
class Meta:
model = Profile
fields = ['user_id', 'full_name', 'url']
def get_full_name(self, obj):
return obj.user.get_full_name()
class CreateProfileSerializer(serializers.ModelSerializer):
user = CreateUserSerializer()
class Meta:
model = Profile
fields = [
'user',
'follows'
]
def create(self, validated_data):
new_user = CreateUserSerializer().create(validated_data.pop('user'))
new_profile = Profile.objects.get(user_id=new_user.id)
new_profile.save()
return new_profile
class ProfileSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
reputation = serializers.CharField(max_length=8, read_only=True)
follows = FollowSerializer(read_only=True, many=True)
url = serializers.HyperlinkedIdentityField(view_name='profiles:profile-detail')
questions_count = serializers.SerializerMethodField()
answers_count = serializers.SerializerMethodField()
followed_by = serializers.SerializerMethodField()
class Meta:
model = Profile
fields = [
'url',
'user',
'reputation',
'follows',
'questions_count',
'answers_count',
'followed_by'
]
def get_questions_count(self, obj):
return obj.user.questions.count()
def get_answers_count(self, obj):
return obj.user.answers.count()
def get_followed_by(self, obj):
return obj.profile_set.count()
class UpdateProfileSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = Profile
fields = [
'user',
'reputation',
'follows',
]
def validate_follows(self, value):
if self.instance in value:
raise serializers.ValidationError(_('User cannot follow self'))
return value
def validate_reputation(self, value):
if value != perform_reputation_check(self.instance.user):
raise serializers.ValidationError(_('Selected reputation is not valid for this user'))
return value
def update(self, instance, validated_data):
UserSerializer().update(instance.user, validated_data.pop('user'))
instance.reputation = validated_data.get('reputation', instance.reputation)
if validated_data['follows']:
instance.follows.add(*validated_data['follows'])
instance.save()
return instance
class AuthorSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='profiles:profile-detail')
full_name = serializers.SerializerMethodField()
class Meta:
model = MyUser
fields = [
'id',
'email',
'url',
'full_name',
]
def get_full_name(self, obj):
return obj.get_full_name()
| TheRedLady/codebook | codebook/profiles/restapi/serializers.py | Python | gpl-3.0 | 4,811 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2021 Luca Falavigna
#
# Author: Luca Falavigna <dktrkranz@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Deb-o-Matic documentation build configuration file
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Deb-o-Matic'
copyright = '2007-2021, Luca Falavigna'
version = '0.25'
release = '0.25'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_use_index = True
htmlhelp_basename = 'Deb-o-Maticdoc'
latex_documents = [
('index', 'Deb-o-Matic.tex', 'Deb-o-Matic Documentation',
'Luca Falavigna', 'manual', 'True')]
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}'}
man_pages = [
('index', 'deb-o-matic', 'Deb-o-Matic Documentation',
['Luca Falavigna'], 1)]
| debomatic/debomatic | docs/conf.py | Python | gpl-3.0 | 1,509 |
from __future__ import print_function
from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
from ast import literal_eval
try: # run as a package if installed
from pcntoolkit.model.bayesreg import BLR
from pcntoolkit.normative_model.norm_base import NormBase
from pcntoolkit.dataio import fileio
from pcntoolkit.util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
except ImportError:
pass
path = os.path.abspath(os.path.dirname(__file__))
if path not in sys.path:
sys.path.append(path)
del path
from model.bayesreg import BLR
from norm_base import NormBase
from dataio import fileio
from util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
class NormBLR(NormBase):
""" Normative modelling based on Bayesian Linear Regression
"""
def __init__(self, **kwargs):
X = kwargs.pop('X', None)
y = kwargs.pop('y', None)
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
self.optim_alg = kwargs.get('optimizer','powell')
if X is None:
raise(ValueError, "Data matrix must be specified")
if len(X.shape) == 1:
self.D = 1
else:
self.D = X.shape[1]
# Parse model order
if kwargs is None:
model_order = 1
elif 'configparam' in kwargs: # deprecated syntax
model_order = kwargs.pop('configparam')
elif 'model_order' in kwargs:
model_order = kwargs.pop('model_order')
else:
model_order = 1
# Force a default model order and check datatype
if model_order is None:
model_order = 1
if type(model_order) is not int:
model_order = int(model_order)
# configure heteroskedastic noise
if 'varcovfile' in kwargs:
var_cov_file = kwargs.get('varcovfile')
if var_cov_file.endswith('.pkl'):
self.var_covariates = pd.read_pickle(var_cov_file)
else:
self.var_covariates = np.loadtxt(var_cov_file)
if len(self.var_covariates.shape) == 1:
self.var_covariates = self.var_covariates[:, np.newaxis]
n_beta = self.var_covariates.shape[1]
self.var_groups = None
elif 'vargroupfile' in kwargs:
# configure variance groups (e.g. site specific variance)
var_groups_file = kwargs.pop('vargroupfile')
if var_groups_file.endswith('.pkl'):
self.var_groups = pd.read_pickle(var_groups_file)
else:
self.var_groups = np.loadtxt(var_groups_file)
var_ids = set(self.var_groups)
var_ids = sorted(list(var_ids))
n_beta = len(var_ids)
else:
self.var_groups = None
self.var_covariates = None
n_beta = 1
# are we using ARD?
if 'use_ard' in kwargs:
self.use_ard = kwargs.pop('use_ard')
else:
self.use_ard = False
if self.use_ard:
n_alpha = self.D * model_order
else:
n_alpha = 1
# Configure warped likelihood
if 'warp' in kwargs:
warp_str = kwargs.pop('warp')
if warp_str is None:
self.warp = None
n_gamma = 0
else:
# set up warp
exec('self.warp =' + warp_str + '()')
n_gamma = self.warp.get_n_params()
else:
self.warp = None
n_gamma = 0
self._n_params = n_alpha + n_beta + n_gamma
self._model_order = model_order
print("configuring BLR ( order", model_order, ")")
if (theta is None) or (len(theta) != self._n_params):
print("Using default hyperparameters")
self.theta0 = np.zeros(self._n_params)
else:
self.theta0 = theta
self.theta = self.theta0
# initialise the BLR object if the required parameters are present
if (theta is not None) and (y is not None):
Phi = create_poly_basis(X, self._model_order)
self.blr = BLR(theta=theta, X=Phi, y=y,
warp=self.warp, **kwargs)
else:
self.blr = BLR(**kwargs)
@property
def n_params(self):
return self._n_params
@property
def neg_log_lik(self):
return self.blr.nlZ
def estimate(self, X, y, **kwargs):
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
# remove warp string to prevent it being passed to the blr object
kwargs.pop('warp',None)
Phi = create_poly_basis(X, self._model_order)
if len(y.shape) > 1:
y = y.ravel()
if theta is None:
theta = self.theta0
# (re-)initialize BLR object because parameters were not specified
self.blr = BLR(theta=theta, X=Phi, y=y,
var_groups=self.var_groups,
warp=self.warp, **kwargs)
self.theta = self.blr.estimate(theta, Phi, y,
var_covariates=self.var_covariates, **kwargs)
return self
def predict(self, Xs, X=None, y=None, **kwargs):
theta = self.theta # always use the estimated coefficients
# remove from kwargs to avoid downstream problems
kwargs.pop('theta', None)
Phis = create_poly_basis(Xs, self._model_order)
if X is None:
Phi =None
else:
Phi = create_poly_basis(X, self._model_order)
# process variance groups for the test data
if 'testvargroupfile' in kwargs:
var_groups_test_file = kwargs.pop('testvargroupfile')
if var_groups_test_file.endswith('.pkl'):
var_groups_te = pd.read_pickle(var_groups_test_file)
else:
var_groups_te = np.loadtxt(var_groups_test_file)
else:
var_groups_te = None
# process test variance covariates
if 'testvarcovfile' in kwargs:
var_cov_test_file = kwargs.get('testvarcovfile')
if var_cov_test_file.endswith('.pkl'):
var_cov_te = pd.read_pickle(var_cov_test_file)
else:
var_cov_te = np.loadtxt(var_cov_test_file)
else:
var_cov_te = None
# do we want to adjust the responses?
if 'adaptrespfile' in kwargs:
y_adapt = fileio.load(kwargs.pop('adaptrespfile'))
if len(y_adapt.shape) == 1:
y_adapt = y_adapt[:, np.newaxis]
else:
y_adapt = None
if 'adaptcovfile' in kwargs:
X_adapt = fileio.load(kwargs.pop('adaptcovfile'))
Phi_adapt = create_poly_basis(X_adapt, self._model_order)
else:
Phi_adapt = None
if 'adaptvargroupfile' in kwargs:
var_groups_adapt_file = kwargs.pop('adaptvargroupfile')
if var_groups_adapt_file.endswith('.pkl'):
var_groups_ad = pd.read_pickle(var_groups_adapt_file)
else:
var_groups_ad = np.loadtxt(var_groups_adapt_file)
else:
var_groups_ad = None
if y_adapt is None:
yhat, s2 = self.blr.predict(theta, Phi, y, Phis,
var_groups_test=var_groups_te,
var_covariates_test=var_cov_te,
**kwargs)
else:
yhat, s2 = self.blr.predict_and_adjust(theta, Phi_adapt, y_adapt, Phis,
var_groups_test=var_groups_te,
var_groups_adapt=var_groups_ad,
**kwargs)
return yhat, s2
| amarquand/nispat | pcntoolkit/normative_model/norm_blr.py | Python | gpl-3.0 | 8,464 |
# coding=utf-8
# Author: CristianBB
# Greetings to Mr. Pine-apple
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from requests.compat import urljoin
from sickbeard import helpers, logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class newpctProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, 'Newpct')
self.onlyspasearch = None
self.url = 'http://www.newpct.com'
self.urls = {'search': urljoin(self.url, 'index.php')}
self.cache = tvcache.TVCache(self, min_time=20)
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
"""
Search query:
http://www.newpct.com/index.php?l=doSearch&q=fringe&category_=All&idioma_=1&bus_de_=All
q => Show name
category_ = Category 'Shows' (767)
idioma_ = Language Spanish (1), All
bus_de_ = Date from (All, mes, semana, ayer, hoy)
"""
results = []
# Only search if user conditions are true
lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang
search_params = {
'l': 'doSearch',
'q': '',
'category_': 'All',
'idioma_': 1,
'bus_de_': 'All'
}
for mode in search_strings:
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
if self.onlyspasearch:
search_params['idioma_'] = 1
else:
search_params['idioma_'] = 'All'
# Only search if user conditions are true
if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
logger.log('Show info is not spanish, skipping provider search', logger.DEBUG)
continue
search_params['bus_de_'] = 'All' if mode != 'RSS' else 'semana'
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
search_params['q'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='categoryTable')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 3: # Headers + 1 Torrent + Pagination
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
# 'Fecha', 'Título', 'Tamaño', ''
# Date, Title, Size
labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]
for row in torrent_rows[1:-1]:
try:
cells = row('td')
torrent_row = row.find('a')
download_url = torrent_row.get('href', '')
title = self._processTitle(torrent_row.get('title', ''), download_url)
if not all([title, download_url]):
continue
# Provider does not provide seeders/leechers
seeders = 1
leechers = 0
#2 is the 'Tamaño' column.
torrent_size = cells[2].get_text(strip=True)
size = convert_size(torrent_size) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log('Found result: {0}'.format(title), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError):
continue
results += items
return results
def get_url(self, url, post_data=None, params=None, timeout=30, **kwargs): # pylint: disable=too-many-arguments
"""
returns='content' when trying access to torrent info (For calling torrent client). Previously we must parse
the URL to get torrent file
"""
trickery = kwargs.pop('returns', '')
if trickery == 'content':
kwargs['returns'] = 'text'
data = super(newpctProvider, self).get_url(url, post_data=post_data, params=params, timeout=timeout, **kwargs)
url = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
url = urljoin(self.url, url.rsplit('=', 1)[-1])
kwargs['returns'] = trickery
return super(newpctProvider, self).get_url(url, post_data=post_data, params=params,
timeout=timeout, **kwargs)
def download_result(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self.login():
return False
urls, filename = self._make_url(result)
for url in urls:
# Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
data = self.get_url(url, returns='text')
url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
if url_torrent.startswith('http'):
self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})
logger.log('Downloading a result from {0}'.format(url))
if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log('Saved result to {0}'.format(filename), logger.INFO)
return True
else:
logger.log('Could not download {0}'.format(url), logger.WARNING)
helpers.remove_file_failed(filename)
if urls:
logger.log('Failed to download any results', logger.WARNING)
return False
@staticmethod
def _processTitle(title, url):
# Remove 'Mas informacion sobre ' literal from title
title = title[22:]
title = re.sub(r'[ ]{2,}', ' ', title, flags=re.I)
# Quality - Use re module to avoid case sensitive problems with replace
title = re.sub(r'\[HDTV 1080p?[^\[]*]', '1080p HDTV x264', title, flags=re.I)
title = re.sub(r'\[HDTV 720p?[^\[]*]', '720p HDTV x264', title, flags=re.I)
title = re.sub(r'\[ALTA DEFINICION 720p?[^\[]*]', '720p HDTV x264', title, flags=re.I)
title = re.sub(r'\[HDTV]', 'HDTV x264', title, flags=re.I)
title = re.sub(r'\[DVD[^\[]*]', 'DVDrip x264', title, flags=re.I)
title = re.sub(r'\[BluRay 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay Rip 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay Rip 720p?[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay MicroHD[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[MicroHD 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BLuRay[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BRrip[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BDrip[^\[]*]', '720p BluRay x264', title, flags=re.I)
#detect hdtv/bluray by url
#hdtv 1080p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-610/hdtv-1080p-ac3-5-1/
#hdtv 720p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-26/hdtv-720p-ac3-5-1/
#hdtv example url: http://www.newpct.com/descargar-serie/foo/capitulo-214/hdtv/
#bluray compilation example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-11/bluray-1080p/
title_hdtv = re.search(r'HDTV', title, flags=re.I)
title_720p = re.search(r'720p', title, flags=re.I)
title_1080p = re.search(r'1080p', title, flags=re.I)
title_x264 = re.search(r'x264', title, flags=re.I)
title_bluray = re.search(r'bluray', title, flags=re.I)
title_serie_hd = re.search(r'descargar\-seriehd', title, flags=re.I)
url_hdtv = re.search(r'HDTV', url, flags=re.I)
url_720p = re.search(r'720p', url, flags=re.I)
url_1080p = re.search(r'1080p', url, flags=re.I)
url_bluray = re.search(r'bluray', url, flags=re.I)
if not title_hdtv and url_hdtv:
title += ' HDTV'
if not title_x264:
title += ' x264'
if not title_bluray and url_bluray:
title += ' BluRay'
if not title_x264:
title += ' x264'
if not title_1080p and url_1080p:
title += ' 1080p'
title_1080p = True
if not title_720p and url_720p:
title += ' 720p'
title_720p = True
if not (title_720p or title_1080p) and title_serie_hd:
title += ' 720p'
# Language
title = re.sub(r'\[Spanish[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[Castellano[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[Español[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[AC3 5\.1 Español[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
if re.search(r'\[V.O.[^\[]*]', title, flags=re.I):
title += '-NEWPCTVO'
else:
title += '-NEWPCT'
return title.strip()
provider = newpctProvider()
| Arcanemagus/SickRage | sickbeard/providers/newpct.py | Python | gpl-3.0 | 10,924 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# DChars Copyright (C) 2012 Suizokukan
# Contact: suizokukan _A.T._ orange dot fr
#
# This file is part of DChars.
# DChars is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars❏ : dchars/languages/lat/symbols.py
"""
# problem with Pylint :
# pylint: disable=E0611
# many errors like "No name 'extensions' in module 'dchars'"
from dchars.utilities.name2symbols import Name2Symbols
#...............................................................................
# symbols used by Latin
#
# CAVEAT ! If you modify these dictionaries, don't forget to modify the
# corresponding transliteration's dictionaries !
#
#...............................................................................
SYMB_UPPER_CASE = Name2Symbols(
{
'a' : ('A',),
'b' : ('B',),
'c' : ('C',),
'd' : ('D',),
'e' : ('E',),
'f' : ('F',),
'g' : ('G',),
'h' : ('H',),
'i' : ('I',),
'j' : ('J',),
'k' : ('K',),
'l' : ('L',),
'm' : ('M',),
'n' : ('N',),
'o' : ('O',),
'p' : ('P',),
'q' : ('Q',),
'r' : ('R',),
's' : ('S',),
't' : ('T',),
'u' : ('U',),
'v' : ('V',),
'w' : ('W',),
'x' : ('X',),
'y' : ('Y',),
'z' : ('Z',),
})
SYMB_LOWER_CASE = Name2Symbols(
{
'a' : ('a',),
'b' : ('b',),
'c' : ('c',),
'd' : ('d',),
'e' : ('e',),
'f' : ('f',),
'g' : ('g',),
'h' : ('h',),
'i' : ('i',),
'j' : ('j',),
'k' : ('k',),
'l' : ('l',),
'm' : ('m',),
'n' : ('n',),
'o' : ('o',),
'p' : ('p',),
'q' : ('q',),
'r' : ('r',),
's' : ('s',),
't' : ('t',),
'u' : ('u',),
'v' : ('v',),
'w' : ('w',),
'x' : ('x',),
'y' : ('y',),
'z' : ('z',),
})
SYMB_PUNCTUATION = Name2Symbols(
{'-' : ("-", "—"),
')' : (')',),
'(' : ('(',),
'[' : ('[',),
']' : (']',),
'{' : ('{',),
'}' : ('}',),
'0' : ('0',),
'1' : ('1',),
'2' : ('2',),
'3' : ('3',),
'4' : ('4',),
'5' : ('5',),
'6' : ('6',),
'7' : ('7',),
'8' : ('8',),
'9' : ('9',),
' ' : (' ',),
'.' : ('.',),
',' : (',',),
';' : (';',),
'!' : ('!',),
'?' : ('?',),
'"' : ('"','‘',"’",),
"'" : ("'","᾽"),
":" : (":"),
'\n' : ('\n',),
'\r' : ('\r',),
'\t' : ('\t',),
})
SYMB_DIACRITICS = Name2Symbols(
{
"stress" : ( chr(0x301), chr(0x030D) ), # á, a̍
"long" : ( chr(0x304),), # ā
"short" : ( chr(0x306),), # ă
"diaeresis": ( chr(0x308),), # ä
})
# we define these constants in order to avoir multiple calls to SYMB_DIACRITICS.get_default_symbol :
DEFAULTSYMB__STRESS = SYMB_DIACRITICS.get_default_symbol("stress")
DEFAULTSYMB__DIAERESIS = SYMB_DIACRITICS.get_default_symbol("diaeresis")
#...............................................................................
# we calculate these tuple which is often used in order to speed up the code :
#...............................................................................
SYMB_DIACRITICS__STRESS = SYMB_DIACRITICS["stress"]
SYMB_DIACRITICS__LENGTH = ( SYMB_DIACRITICS["short"] + \
SYMB_DIACRITICS["long"] )
SYMB_DIACRITICS__DIAERESIS = SYMB_DIACRITICS["diaeresis"]
| suizokukan/anceps | dchars/languages/lat/symbols.py | Python | gpl-3.0 | 4,869 |
from datetime import date
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.dateformat import format as format_date
from django.shortcuts import get_object_or_404
from django.http import Http404
from haystack.generic_views import SearchView
from .models import Post, Category
class _PostsListView(ListView):
"""
Base class for displaying post lists
"""
template_name = '{0}/blog_posts_list.html'.format(settings.CURRENT_SKIN)
context_object_name = 'posts'
paginate_by = settings.BLOG_POSTS_PAGINATE_BY
def get_queryset(self):
return super().get_queryset().prefetch_related('categories')
class _PageTitleMixIn:
"""
Adds page_title to ListView's context
"""
page_title = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.page_title
return context
class BlogHomeView(_PostsListView):
"""
Displays the list of all published posts starting from the recent.
Template: ``blog_posts_list.html``
Specific context variable: ``posts``
"""
queryset = Post.objects.published()
class BlogFeaturedPostsView(_PageTitleMixIn, _PostsListView):
"""
Displays the list of featured posts
Template: ``blog_posts_list.html``
Specific context variables:
- ``posts``
- ``page_title``
"""
queryset = Post.objects.featured()
page_title = _('Featured Posts')
class BlogCategoryView(_PageTitleMixIn, _PostsListView):
"""
Displays the list of posts in a given category
Template: ``blog_posts_list.html``
Specific context variables:
- ``posts``
- ``page_title``
"""
def get_queryset(self):
category = get_object_or_404(Category, slug=self.kwargs['slug'])
self.page_title = _('Posts in "{0}" category'.format(category.name))
return Post.objects.published().filter(categories__pk=category.pk)
class BlogCategoriesListView(_PageTitleMixIn, ListView):
"""
Displays the list of categories that have posts in them
Template: ``blog_categories_list.html``
Specific context variables:
- ``categories``
- ``page_title``
"""
template_name = '{0}/blog_categories_list.html'.format(settings.CURRENT_SKIN)
queryset = Category.objects.non_empty()
page_title = _('Categories')
context_object_name = 'categories'
class BlogPostView(DetailView):
"""
Displays a blog post page
Template: ``blog_post.html``
Specific context variable: ``post``
"""
template_name = '{0}/blog_post.html'.format(settings.CURRENT_SKIN)
model = Post
context_object_name = 'post'
query_pk_and_slug = True
def dispatch(self, request, *args, **kwargs):
self.request = request
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
"""
Prevent non-authenticated users from viewing unpublished posts
"""
post = super().get_object(queryset)
if not(post.is_published or self.request.user.is_authenticated):
raise Http404
return post
class BlogArchiveView(_PageTitleMixIn, ListView):
"""
Displays the blog archive by years and months
Template: ``blog_archive.html``
Specific context variables:
- ``months`` -- the list of class:`datetime.data` objects representing months
- ``page_title``
"""
template_name = '{0}/blog_archive.html'.format(settings.CURRENT_SKIN)
queryset = Post.objects.published().dates('date_published', 'month', order='DESC')
context_object_name = 'months'
page_title = _('Blog Archive')
class BlogMonthArchiveView(_PageTitleMixIn, _PostsListView):
"""
Displays the list of posts by year and month
Template: ``blog_posts_list.html``
Specific context variables:
- ``posts``
- ``page_title``
"""
def get_queryset(self):
year = int(self.kwargs['year'])
month = int(self.kwargs['month'])
self.page_title = _('Blog Archive, {0}').format(format_date(date(year=year, month=month, day=1), 'F Y'))
return Post.objects.published().filter(date_published__year=year, date_published__month=month)
class BlogPostSearchView(SearchView):
"""
Displays the search page
Template: ``blog_search.html``
Specific context variables: none.
"""
template_name = '{0}/blog_search.html'.format(settings.CURRENT_SKIN)
paginate_by = 10
def get_queryset(self):
return super().get_queryset().highlight()
| romanvm/romans_blog | blog/views.py | Python | gpl-3.0 | 4,713 |
# Copyright 2013 Daniel Richman
#
# This file is part of The Snowball Ticketing System.
#
# The Snowball Ticketing System is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Snowball Ticketing System is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with The Snowball Ticketing System. If not, see
# <http://www.gnu.org/licenses/>.
"""
Information
This package contains utility functions that help produce, and a Flask
blueprint containing, the static views (homepage, info about the ball, ...).
Further, the :func:`prerender` function may be used to generate the HTML that
would have been produced by those views in advance, so it may be served
directly without going via Python
"""
from __future__ import unicode_literals
import os
import os.path
import re
import functools
import yaml
import shutil
import flask
import flask.json
import jinja2
from flask import render_template, request
from . import utils
__all__ = ["bp", "prerender"]
page_filename_re = re.compile(r'^([a-zA-Z_]+)\.html$')
data_filename_re = re.compile("^([a-z]+)\.yaml$")
logger = utils.getLogger(__name__)
#: the :class:`flask.Blueprint` containing info views
bp = flask.Blueprint('info', __name__)
root_dir = os.path.join(os.path.dirname(__file__), '..')
data_dir = os.path.join(root_dir, 'data')
templates_dir = os.path.join(root_dir, 'templates')
pages_dir = os.path.join(templates_dir, 'theme', 'pages')
prerendered_dir = os.path.join(root_dir, 'prerendered')
def load_data(data_dir):
data = {}
for filename in os.listdir(data_dir):
key, = data_filename_re.match(filename).groups()
with open(os.path.join(data_dir, filename)) as f:
data[key] = yaml.safe_load(f)
return data
def find_pages(pages_dir):
for page in os.listdir(pages_dir):
match = page_filename_re.match(page)
assert match
yield match.groups()[0]
def setup(bp, pages):
for endpoint in pages:
if endpoint == 'index':
url = "/"
else:
url = "/" + endpoint.replace("_", "-")
template = "theme/pages/{0}.html".format(endpoint)
view = functools.partial(render_template, template, **pages_data)
bp.add_url_rule(url, endpoint, view)
bp.add_app_template_global(pages, 'info_pages')
def prerender_pages(app, pages, output_dir):
# Note, this assumes that it's OK for url_for to produce
# urls rooted at /
# Also assumes the blueprint is attached at /
# Don't use _external!
for filename in os.listdir(output_dir):
if filename != '.gitignore':
logger.debug("Cleaning %s", filename)
os.unlink(os.path.join(output_dir, filename))
else:
logger.debug("Keeping .gitignore")
with app.test_request_context():
for endpoint in pages:
filename = os.path.join(output_dir, endpoint + ".html")
template = "theme/pages/{0}.html".format(endpoint)
logger.debug("Rendering endpoint %r -> %r", endpoint, filename)
with open(filename, "w") as f:
f.write(render_template(template, **pages_data))
pages = list(find_pages(pages_dir))
pages_data = load_data(data_dir)
setup(bp, pages)
#: Given an app with `bp` attached, generate static info pages in prerendered/
prerender = functools.partial(prerender_pages, pages=pages,
output_dir=prerendered_dir)
| danielrichman/snowball-ticketing | snowball_ticketing/info.py | Python | gpl-3.0 | 3,845 |
from django.contrib import admin
from models import Article, RelatedLink, Category
# Register your models here.
class RelatedLinkInline(admin.TabularInline):
model = RelatedLink
extra = 1
class ArticleAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
search_fields = ['title']
list_display = ['title', 'slug', 'category']
list_filter = ['created', 'modified']
inlines = [
RelatedLinkInline
]
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category)
| unitycoders/uc-django-site | lectern/admin.py | Python | gpl-3.0 | 495 |
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
# ############################################################################
# ########## Libraries #############
# ##################################
# Standard library
import logging
from os import path
# 3rd party modules
import arrow
from isogeo_pysdk import Isogeo
# Django project
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from isogeo_notify.models import Metadata, Workgroup
# ############################################################################
# ########## Globals ##############
# #################################
# logger = logging.getLogger("ElPaso")
# ############################################################################
# ########### Classes #############
# #################################
class Command(BaseCommand):
args = '<foo bar ...>'
help = 'our help string comes here'
def _update_db(self):
"""Update metadata list from API."""
# get stored metadata
db_mds = Metadata.objects.all()
db_wgs = Workgroup.objects.all()
# connect to isogeo
isogeo = Isogeo(client_id=settings.ISOGEO_CLIENT_ID,
client_secret=settings.ISOGEO_CLIENT_SECRET,
lang="fr")
token = isogeo.connect()
search = isogeo.search(token,
# page_size=10,
order_by="modified",
# whole_share=0,
# sub_resources=["events"]
)
# tags
tags = search.get("tags")
for tag in tags:
if tag.startswith("owner"):
new_owner = Workgroup(isogeo_uuid=tag[6:-1],
label=tags.get(tag))
new_owner.save()
# metadatas
# for md in search.get("results"):
# try:
# new_md = Metadata(isogeo_id=md.get("_id"),
# title=md.get("title", "No title"),
# name=md.get("name"),
# abstract=md.get("abstract"),
# md_dt_crea=md.get("_created"),
# md_dt_update=md.get("_modified"),
# rs_dt_crea=md.get("created"),
# rs_dt_update=md.get("modified"),
# source=True)
# new_md.save()
# logging.info("Metadata added")
# except IntegrityError:
# # in case of duplicated offer
# logging.error("Metadata already existed")
# continue
logging.info("{} metadata added")
def handle(self, *args, **options):
self._update_db()
| Guts/isogeo-notifier | web/isogeo_notify/management/commands/api2db.py | Python | gpl-3.0 | 2,944 |
#!/usr/bin/env python
import sys
import json
import os
import datetime
import time
from pprint import pprint
default_os = '8-stream'
##next_os = 'RHEL8.4'
#next_branch_base = 'rhel-8'
jenkins_url = 'https://jenkins-networkmanager.apps.ocp.ci.centos.org/'
class GitlabTrigger(object):
def __init__(self, data):
self.data = data
# If we don't have python-gitlab we can still use object for parsing
try:
import gitlab
self.gl_api = gitlab.Gitlab.from_config('gitlab.freedesktop.org')
group = 'NetworkManager'
self.gl_project = self.gl_api.projects.get('%s/%s' % (group, data['repository']['name']))
except:
pass
@property
def request_type(self):
return self.data['object_kind']
@property
def comment(self):
ret = None
if self.request_type == 'note':
ret = self.data['object_attributes']['note'].strip()
return ret
@property
def description(self):
ret = None
if self.request_type == 'note':
ret = self.data['merge_request']['description']
elif self.request_type == 'merge_request':
ret = self.data['object_attributes']['description']
return ret
@property
def title(self):
ret = None
if self.request_type == 'note':
ret = self.data['merge_request']['title']
elif self.request_type == 'merge_request':
ret = self.data['object_attributes']['title']
return ret
@property
def source_branch(self):
source_branch = None
if self.request_type == 'note':
source_branch = self.data['merge_request']['source_branch']
elif self.request_type == 'merge_request':
source_branch = self.data['object_attributes']['source_branch']
return source_branch
@property
def target_branch(self):
target_branch = None
if self.request_type == 'note':
target_branch = self.data['merge_request']['target_branch']
elif self.request_type == 'merge_request':
target_branch = self.data['object_attributes']['target_branch']
return target_branch
@property
def source_project_id(self):
target_branch = None
if self.request_type == 'note':
target_branch = self.data['merge_request']['source_project_id']
elif self.request_type == 'merge_request':
target_branch = self.data['object_attributes']['source_project_id']
return target_branch
@property
def target_project_id(self):
target_branch = None
if self.request_type == 'note':
target_branch = self.data['merge_request']['target_project_id']
elif self.request_type == 'merge_request':
target_branch = self.data['object_attributes']['target_project_id']
return target_branch
@property
def commit(self):
commit = None
if self.request_type == 'note':
commit = self.data['merge_request']['last_commit']['id'].strip()
elif self.request_type == 'merge_request':
commit = self.data['object_attributes']['last_commit']['id'].strip()
return commit
@property
def commit_author(self):
author = None
if self.request_type == 'note':
author = self.data['merge_request']['last_commit']['author']['email'].split('@')[0].strip()
elif self.request_type == 'merge_request':
author = self.data['object_attributes']['last_commit']['author']['email'].split('@')[0].strip()
return author
@property
def commit_message(self):
message = None
if self.request_type == 'note':
message = self.data['merge_request']['last_commit']['message']
elif self.request_type == 'merge_request':
message = self.data['object_attributes']['last_commit']['message']
return message.strip()
def post_commit_comment(self, text):
com = self.gl_project.commits.get(self.commit)
com.comments.create({'note': text})
def play_commit_job(self):
com = self.gl_project.commits.get(self.commit)
if com.last_pipeline is None:
return
pipeline = self.gl_project.pipelines.get(com.last_pipeline["id"])
jobs = pipeline.jobs.list()
for job in jobs:
if job.name == "TestResults":
job_trigger = self.gl_project.jobs.get(job.id)
job_trigger.play()
def mapper_text(self, refspec):
print(">> Reading mapper.yaml from gitlab ref: " + refspec)
f = self.gl_project.files.get(file_path='mapper.yaml', ref=refspec)
return f.decode()
@property
def merge_request_id(self):
mr_id = None
if self.request_type == 'note':
mr_id = self.data['merge_request']['iid']
elif self.request_type == 'merge_request':
mr_id = self.data['object_attributes']['iid']
return mr_id
@property
def merge_request_url(self):
mr_id = None
if self.request_type == 'note':
mr_id = self.data['merge_request']['url']
elif self.request_type == 'merge_request':
mr_id = self.data['object_attributes']['url']
return mr_id
@property
def repository(self):
return self.data['repository']['name']
def set_pipeline(self, status):
try:
description = ''
if status == 'pending':
description = 'The build has started'
if status == 'running':
description = 'The build is running'
elif status == 'canceled':
description == 'The build has been canceled'
elif status == 'success':
description == 'The build has finshed successfully'
elif status == 'failed':
description == 'The build has finshed unstable or failing'
com = self.gl_project.commits.get(self.commit)
if 'NetworkManager' in self.repository: # becuse of the new version prohibiting NAT target_url!
com.statuses.create({'state' : status,
'name': os.environ['BUILD_URL'],
'description' : description})
else:
com.statuses.create({'state' : status,
'target_url' : os.environ['BUILD_URL'],
'name': self.repository + ' test verification',
'description' : description})
except Exception as e:
print(str(e))
def get_rebuild_detail(gt):
# lets see if there is a @OS:rhelx.y in the desc or commit msg
message = gt.description
for line in message.split('\n'):
if line.strip().lower().startswith('@OS:'):
return line.strip().split(':')[-1]
message = gt.commit_message
for line in message.split('\n'):
if line.strip().lower().startswith('@OS:'):
return line.strip().split(':')[-1]
return None
def get_mapper_yaml(repo_name):
refspec = settings['git_branch']
if gitlab_trigger:
refspec = gitlab_trigger.commit
print(">> Reading mapper.yaml from gitlab ref: " + refspec)
f = get_gitlab_project(repo_name).files.get(file_path='mapper.yaml', ref=refspec)
return f.decode()
# 'os_override' param for 'rebuild RHEL8.9' etc., good for nm less for desktop as it is mainly determined by branching
def execute_build(gt, content, os_override=None):
component = gt.repository
params = []
os_version = default_os
if os_override:
os_version = os_override
params.append({'name': 'RELEASE', 'value': os_version})
if gt.repository == 'NetworkManager': # NM CODE will always use master NMCI
params.append({'name': 'TEST_BRANCH', 'value': 'master'})
params.append({'name': 'REFSPEC', 'value': gt.commit})
project_dir = "NetworkManager-code-mr"
if gt.repository == 'NetworkManager-ci': # NMCI always use main for code
params.append({'name': 'TEST_BRANCH', 'value': gt.commit})
params.append({'name': 'REFSPEC', 'value': 'main'})
project_dir = "NetworkManager-test-mr"
params.append({'name': 'VERSION', 'value': 'MR#%d %s: %s' % (gt.merge_request_id, gt.commit_author, gt.source_branch)})
params.append({'name': 'FEATURES', 'value': 'best'})
params.append({'name': 'RESERVE', 'value': '0s'})
params.append({'name': 'TRIGGER_DATA', 'value': content})
#params.append({'name': 'GL_TOKEN', 'value': os.environ['GL_TOKEN']})
json_part = json.dumps({"parameter": params})
url_part = "--data-urlencode json='%s'" % str(json_part.replace("'",""))
job_url = '%s/job/%s' % (jenkins_url, project_dir)
t = os.environ['JK_TOKEN']
cmd = "curl -k -s -X POST %s/build --data 'token=%s' %s" % (job_url, t, url_part)
os.system("echo %s >> /tmp/gl_commits" % gt.commit)
os.system(cmd)
#print("curl $rc: %d" % )
#print('Started new build in %s' % job_url)
def process_request(data, content):
gt = GitlabTrigger(data)
if gt.source_project_id != gt.target_project_id:
comment = gt.comment
if comment.lower() == 'rebuild':
execute_build(gt, content)
elif comment.lower() == 'rebuild centos8':
execute_build(gt, content, os_override='8')
elif comment.lower() == 'rebuild c8':
execute_build(gt, content, os_override='8')
elif comment.lower() == 'rebuild centos8-stream':
execute_build(gt, content)
elif comment.lower() == 'rebuild c8s':
execute_build(gt, content)
elif '@runtests:' in comment.lower():
execute_build(gt, content)
elif '@build:' in comment.lower(): # NM specific tag to set UPSTREAM_REFSPEC_ID
execute_build(gt, content)
else:
print('Irrelevant Note...')
elif gt.request_type == 'note':
comment = gt.comment
if comment.lower() == 'rebuild':
execute_build(gt, content)
elif comment.lower() == 'rebuild centos8':
execute_build(gt, content, os_override='8')
elif comment.lower() == 'rebuild c8':
execute_build(gt, content, os_override='8')
elif comment.lower() == 'rebuild centos8-stream':
execute_build(gt, content)
elif comment.lower() == 'rebuild c8s':
execute_build(gt, content)
elif '@runtests:' in comment.lower():
execute_build(gt, content)
elif '@build:' in comment.lower(): # NM specific tag to set UPSTREAM_REFSPEC_ID
execute_build(gt, content)
else:
print('Irrelevant Note...')
elif data['object_kind'] == 'merge_request':
if data['object_attributes']['action'] == 'merge':
print("MERGE packet, ignoring")
elif data['object_attributes']['action'] == 'close':
print("CLOSE packet, ignoring")
elif data['object_attributes']['action'] in ['approved', 'unapproved']:
print("APPROVED packet, ignoring")
elif data['object_attributes']['action'] == 'update':
if gt.title.startswith("WIP"):
print("This is WIP Merge Request - not proceeding")
else:
if not os.path.exists('/tmp/gl_commits'):
os.system("echo '' > /tmp/gl_commits")
with open('/tmp/gl_commits') as f:
commits = f.read().splitlines()
if gt.commit not in commits:
override = get_rebuild_detail(gt)
if override is not None:
override = override.upper()
execute_build(gt, content, os_override=override)
else:
print("Commit %s have already executed, use rebuild if needed" % gt.commit)
else:
if gt.title.startswith("WIP"):
print("This is WIP Merge Request - not proceeding")
else:
execute_build(gt, content)
else:
print('Invalid object kind: %s' % data['object_kind'])
def run():
if len(sys.argv) < 2:
print("Invalid input")
sys.exit(1)
json_file = sys.argv[1]
with open(json_file) as f:
content = f.read()
content = """%s"""%content
print('\n\n\n\n\n-------------')
print(datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S'))
data = json.loads(content)
#pprint(data)
process_request(data, content)
print('----end-------')
#pprint(content)
if __name__ == '__main__':
run()
| NetworkManager/NetworkManager-ci | run/centos-ci/cico_gitlab_trigger.py | Python | gpl-3.0 | 12,762 |
import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
pickle.dump(mean_image, open("mean_img", "wb"), 1)
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
Returns: A tuple of
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.iteritems():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print 'loading training data for synset %d / %d' % (i + 1, len(wnids))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
return class_names, X_train, y_train, X_val, y_val, X_test, y_test
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt) will
be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = pickle.load(f)['model']
except pickle.UnpicklingError:
continue
return models
| HaoMood/cs231n | assignment2/assignment2/cs231n/data_utils.py | Python | gpl-3.0 | 6,912 |
from rest_framework import viewsets, permissions
import models
import serializers
class PageViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Page.objects.all()
serializer_class = serializers.PageSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
class PostViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Post.objects.all()
serializer_class = serializers.PostSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
| esitamon/django-skeleton | app/website/api.py | Python | gpl-3.0 | 840 |
import pytest
np = pytest.importorskip("numpy")
npt = pytest.importorskip("numpy.testing")
import networkx as nx
from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
class TestDegreeMixingDict(BaseTestDegreeMixing):
def test_degree_mixing_dict_undirected(self):
d = nx.degree_mixing_dict(self.P4)
d_result = {
1: {2: 2},
2: {1: 2, 2: 2},
}
assert d == d_result
def test_degree_mixing_dict_undirected_normalized(self):
d = nx.degree_mixing_dict(self.P4, normalized=True)
d_result = {
1: {2: 1.0 / 3},
2: {1: 1.0 / 3, 2: 1.0 / 3},
}
assert d == d_result
def test_degree_mixing_dict_directed(self):
d = nx.degree_mixing_dict(self.D)
print(d)
d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}}
assert d == d_result
def test_degree_mixing_dict_multigraph(self):
d = nx.degree_mixing_dict(self.M)
d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}}
assert d == d_result
class TestDegreeMixingMatrix(BaseTestDegreeMixing):
def test_degree_mixing_matrix_undirected(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 2],
[0, 2, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.P4, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.P4)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_directed(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 0, 2],
[0, 1, 0, 1],
[0, 0, 0, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.D, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.D)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_multigraph(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 3],
[0, 0, 3, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.M, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.M)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_selfloop(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.S, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.S)
npt.assert_equal(a, a_result / float(a_result.sum()))
class TestAttributeMixingDict(BaseTestAttributeMixing):
def test_attribute_mixing_dict_undirected(self):
d = nx.attribute_mixing_dict(self.G, "fish")
d_result = {
"one": {"one": 2, "red": 1},
"two": {"two": 2, "blue": 1},
"red": {"one": 1},
"blue": {"two": 1},
}
assert d == d_result
def test_attribute_mixing_dict_directed(self):
d = nx.attribute_mixing_dict(self.D, "fish")
d_result = {
"one": {"one": 1, "red": 1},
"two": {"two": 1, "blue": 1},
"red": {},
"blue": {},
}
assert d == d_result
def test_attribute_mixing_dict_multigraph(self):
d = nx.attribute_mixing_dict(self.M, "fish")
d_result = {
"one": {"one": 4},
"two": {"two": 2},
}
assert d == d_result
class TestAttributeMixingMatrix(BaseTestAttributeMixing):
def test_attribute_mixing_matrix_undirected(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])
a = nx.attribute_mixing_matrix(
self.G, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_directed(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.D, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_multigraph(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.M, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
| SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/assortativity/tests/test_mixing.py | Python | gpl-3.0 | 5,415 |
#!/bin/env python2.7
# -*- coding: utf-8 -*-
# This file is part of AT-Platform.
#
# AT-Platform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AT-Platform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AT-Platform. If not, see <http://www.gnu.org/licenses/>.
import wxversion
wxversion.select( '2.8' )
import glob, os, time
import wx
import wx.lib.buttons as bt
from pymouse import PyMouse
from pygame import mixer
#=============================================================================
class GenSymbolTextButton( bt.GenBitmapTextButton ): #Derive a class from GenBitmapTextButton and override _GetLabelSize and DrawLabel
"""Bitmapped button with text label displayed in accepted for AAC symbols position"""
#-------------------------------------------------------------------------
def _GetLabelSize(self):
""" used internally """
w, h = self.GetTextExtent( self.GetLabel( ) )
if not self.bmpLabel:
return w, h, True # if there isn't a bitmap use the size of the text
w_bmp = self.bmpLabel.GetWidth( ) + 2
h_bmp = self.bmpLabel.GetHeight( ) + 2
height = h + h_bmp
if w_bmp > w:
width = w_bmp
else:
width = w
return width, height, True
#-------------------------------------------------------------------------
def DrawLabel(self, dc, width, height, dx = 0, dy = 0):
bmp = self.bmpLabel
if bmp is not None: # if the bitmap is used
if self.bmpDisabled and not self.IsEnabled( ):
bmp = self.bmpDisabled
if self.bmpFocus and self.hasFocus:
bmp = self.bmpFocus
if self.bmpSelected and not self.up:
bmp = self.bmpSelected
bw,bh = bmp.GetWidth( ), bmp.GetHeight( ) ## size of the bitmap
if not self.up:
dx = dy = self.labelDelta
hasMask = bmp.GetMask( ) is not None
else:
bw = bh = 0 # no bitmap -> size is zero
if self.IsEnabled( ):
dc.SetTextForeground( self.GetForegroundColour( ) )
else:
dc.SetTextForeground( wx.SystemSettings.GetColour( wx.SYS_COLOUR_GRAYTEXT ) )
label = self.GetLabel( )
tw, th = dc.GetTextExtent( label ) ## size of the text
if not self.up:
dx = dy = 4
if bmp is not None:
dc.DrawBitmap( bmp, ( width - bw ) / 2, ( height - 4*bh / 3 ) / 2, hasMask ) # draw bitmap if available (-bh)
dc.DrawText( label, ( width - tw ) / 2, ( height + 2*bh / 3 ) / 2 ) # draw the text (+bh/2)
#=============================================================================
class bliss_symbols( wx.Frame ):
def __init__(self, parent, id):
self.winWidth, self.winHeight = wx.DisplaySize( )
self.winHeight -= 20
wx.Frame.__init__( self , parent , id, 'ATSymbols' )
style = self.GetWindowStyle( )
self.SetWindowStyle( style | wx.STAY_ON_TOP )
self.parent = parent
self.Maximize( True )
self.Centre( True )
self.MakeModal( True )
self.initializeParameters( )
self.initializeBitmaps( )
self.createGui( )
self.initializeTimer( )
self.createBindings( )
#-------------------------------------------------------------------------
def initializeParameters(self):
with open( './.pathToATPlatform' ,'r' ) as textFile:
self.pathToATPlatform = textFile.readline( )
with open( self.pathToATPlatform + 'parameters', 'r' ) as parametersFile:
for line in parametersFile:
if line[ :line.find('=')-1 ] == 'timeGap':
self.timeGap = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'backgroundColour':
self.backgroundColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'textColour':
self.textColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'scanningColour':
self.scanningColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'selectionColour':
self.selectionColour = line[ line.rfind('=')+2:-1 ]
elif line[ :line.find('=')-1 ] == 'filmVolume':
self.filmVolumeLevel = int( line[ line.rfind('=')+2:-1 ] )
elif line[ :line.find('=')-1 ] == 'musicVolume':
self.musicVolumeLevel = int( line[ line.rfind('=')+2:-1 ] )
elif not line.isspace( ):
print 'Niewłaściwie opisane parametry'
print 'Błąd w linii', line
self.timeGap = 1500
self.backgroundColour = 'white'
self.textColour = 'black'
self.scanningColour = '#E7FAFD'
self.selectionColour = '#9EE4EF'
self.filmVolumeLevel = 100
self.musicVolumeLevel = 40
self.panelIteration = 0
self.rowIteration = 0
self.columnIteration = 0
self.defaultNumberOfColumns = 6
self.defaultNumberOfRows = 4
self.countRows = 0
self.countColumns = 0
self.button = 1
self.countMaxRows = 2
self.countMaxColumns = 2
self.numberOfPresses = 0
self.numberOfSymbol = 0
self.flag = 'panel'
self.mouseCursor = PyMouse( )
self.mousePosition = self.winWidth - 8, self.winHeight + 20 - 8 #+20 becouse of self.winHeight -= 20 in initializator
self.mouseCursor.move( *self.mousePosition )
self.pageFlipSounds = glob.glob( self.pathToATPlatform + 'sounds/page_flip/*' )
mixer.init( ) #sound load too long
self.pageFlipSound = mixer.Sound( self.pageFlipSounds[ 1 ] )
self.lastPageFlipSound = mixer.Sound( self.pathToATPlatform + 'sounds/page-flip-13.wav' )
self.pageFlipSounds = [ mixer.Sound( self.pageFlipSound ) for self.pageFlipSound in self.pageFlipSounds ]
self.SetBackgroundColour( 'black' )
#-------------------------------------------------------------------------
def initializeBitmaps(self):
dict = self.pathToATPlatform + 'symbols/*'
pages = sorted( [ item for item in glob.glob( dict ) if item[ item.rfind( '/' )+1: ].isdigit( ) ] )
self.numberOfpages = len( pages )
self.blissBook = {} #dictionary with keys as number of page and values as list of tuples (each tuple discribes one symbol) in form [bitmap, bitmap's position in sizer, bitmap's label]
self.numberOfRows, self.numberOfColumns, self.numberOfCells = [], [], []
for page in pages:
try:
pageNumber = int( page[ page.rfind( '/' )+1: ] )
except ValueError:
print 'Folderowi %s nadano nieprawidłową nazwę. Dopuszczalna jest tylko nazwa numeryczna.' % page[ page.rfind( '/' )+1: ]
pass
sizerTopology = open( page + '/sizer' )
for line in sizerTopology:
if line[ :12 ] == 'numberOfRows':
self.numberOfRows.append( int( line[ -2 ] ) )
elif line[ :15 ] == 'numberOfColumns':
self.numberOfColumns.append( int( line[ -2 ] ) )
else:
print 'Niewłaściwie opisana tablica na stronie %' % page
self.numberOfColumns.append( self.defaultNumberOfColumns )
self.numberOfRows.append( self.defaultNumberOfRows )
symbols = glob.glob( page + '/*.jpg' ) + glob.glob( page + '/*.png' ) + glob.glob( page + '/*.JPG' ) + glob.glob( page + '/*jpeg' )
symbolInfo = []
self.newHeight = 0.6*self.winHeight / self.numberOfRows[ -1 ]
for symbol in symbols:
image = wx.ImageFromStream( open( symbol, "rb" ) )
self.newWidth = image.GetSize( )[ 0 ] * ( self.newHeight / float( image.GetSize( )[ 1 ] ) )
image.Rescale( self.newWidth, self.newHeight, wx.IMAGE_QUALITY_HIGH )
bitmapSymbol = wx.BitmapFromImage( image )
symbolName = symbol[ symbol.rfind( '/' )+1 : symbol.rfind( '.' ) ]
try:
symbolPosition = int( symbolName.split( '_' )[ 0 ] )
symbolTranslate = symbolName[ symbolName.find( '_' )+1: ].replace( '_', ' ' )
symbolInfo.append( [ bitmapSymbol, symbolPosition, symbolTranslate ] )
except ValueError:
print 'Symbol %s w folderze %s ma nieprawidłową nazwę.' % ( symbolName.split( '_' )[ 0 ], page[ page.rfind( '/' )+1: ] )
pass
symbolInfo.sort( key = lambda symbolInfo: symbolInfo[ 1 ] )
self.blissBook[ pageNumber ] = symbolInfo
#-------------------------------------------------------------------------
def createGui(self):
self.mainSizer = wx.BoxSizer( wx.VERTICAL )
self.panel = wx.Panel( self, 1, style=wx.SUNKEN_BORDER )
self.panel.SetSizeWH( self.winWidth, 0.22*self.winHeight )
self.panelSize = self.panel.GetSize( )
self.displaySizer = wx.BoxSizer ( wx.HORIZONTAL | wx.LEFT )
self.displaySizer.SetMinSize( self.panelSize )
self.displaySizer.Fit( self.panel )
self.displaySizer.Add( self.panel, 1, wx.EXPAND )
self.mainSizer.Add( self.displaySizer, 1, wx.EXPAND | wx.BOTTOM | wx.TOP, border = 1 )
self.subSizers = []
for item in range( len( self.numberOfRows ) ):
subSizer = wx.GridSizer( self.numberOfRows[ item ], self.numberOfColumns[ item ], 1, 1 )
subSizer.SetMinSize( ( self.winWidth, 0.768*self.winHeight ) ) #this should not be done like this. Sizer should fit automatically.
self.subSizers.append( subSizer )
i, j = 0, 1
self.numberOfCells = self.numberOfRows[ item ] * self.numberOfColumns[ item ]
while j <= self.numberOfCells:
try:
if i < len( self.blissBook[ item ] ):
while j != self.blissBook[ item ][ i ][ 1 ]:
b = bt.GenButton( self, -1 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
b.SetBackgroundColour( self.backgroundColour )
self.subSizers[ item ].Add( b, 0, wx.EXPAND | wx.ALIGN_CENTER )
j += 1
b = bt.GenBitmapButton( self , -1 , bitmap = self.blissBook[ item ][ i ][ 0 ] )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
b.SetBackgroundColour( self.backgroundColour )
self.subSizers[item].Add( b, 0, wx.EXPAND | wx.ALIGN_CENTER )
i += 1
j += 1
else:
b = bt.GenButton( self, -1 )
b.Bind( wx.EVT_LEFT_DOWN, self.onPress )
b.SetBackgroundColour( self.backgroundColour )
self.subSizers[item].Add( b, 0, wx.EXPAND | wx.ALIGN_CENTER )
j += 1
except IndexError:
print 'IndexError'
print i, j
self.Layout( )
self.mainSizer.Add( self.subSizers[ item ], proportion = 0, flag=wx.EXPAND | wx.LEFT, border = 3 )
if item != 0:
self.mainSizer.Show( item = self.subSizers[ item ], show = False, recursive = True )
self.SetSizer( self.mainSizer )
#-------------------------------------------------------------------------
def initializeTimer(self):
self.stoper = wx.Timer( self )
self.Bind( wx.EVT_TIMER , self.timerUpdate , self.stoper )
self.stoper.Start( self.timeGap )
#-------------------------------------------------------------------------
def createBindings(self):
self.Bind( wx.EVT_CLOSE , self.OnCloseWindow )
#-------------------------------------------------------------------------
def OnCloseWindow(self, event):
self.mousePosition = self.winWidth/1.85, (self.winHeight+20)/1.85 #+20 becouse of self.winHeight -= 20 in inicializator
self.mouseCursor.move( *self.mousePosition )
dial = wx.MessageDialog(None, 'Czy napewno chcesz wyjść z programu?', 'Wyjście',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION | wx.STAY_ON_TOP)
ret = dial.ShowModal()
if ret == wx.ID_YES:
if __name__ == '__main__':
self.Destroy()
else:
self.parent.Destroy( )
self.Destroy( )
else:
event.Veto()
self.mousePosition = self.winWidth - 8, self.winHeight + 20 - 8 #+20 becouse of self.winHeight -= 20 in inicializator
self.mouseCursor.move( *self.mousePosition )
#-------------------------------------------------------------------------
def onExit(self):
if __name__ == '__main__':
self.stoper.Stop( )
self.Destroy( )
else:
self.stoper.Stop( )
self.MakeModal( False )
self.parent.Show( True )
self.parent.stoper.Start( self.parent.timeGap )
self.Destroy( )
#-------------------------------------------------------------------------
def onPress(self, event):
if self.numberOfPresses == 0:
if self.flag == 'panel':
if self.blissBook[ self.panelIteration ][ 0 ][ 2 ] == 'EXIT' and self.panelIteration == len( self.subSizers ) - 1:
self.onExit( )
else:
items = self.subSizers[ self.panelIteration ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
self.flag = 'row'
self.rowIteration = 0
elif self.flag == 'row':
self.rowIteration -= 1
buttonsToHighlight = range( ( self.rowIteration ) * self.numberOfColumns[ self.panelIteration ], ( self.rowIteration ) * self.numberOfColumns[ self.panelIteration ] + self.numberOfColumns[ self.panelIteration ] )
for button in buttonsToHighlight:
item = self.subSizers[ self.panelIteration ].GetItem( button )
b = item.GetWindow( )
b.SetBackgroundColour( self.selectionColour )
b.SetFocus( )
self.flag = 'columns'
self.columnIteration = 0
elif self.flag == 'columns':
self.columnIteration -= 1
item = self.subSizers[ self.panelIteration ].GetItem( ( self.rowIteration ) * self.numberOfColumns[ self.panelIteration ] + self.columnIteration )
selectedButton = item.GetWindow( )
selectedButton.SetBackgroundColour( self.selectionColour )
selectedButton.SetFocus( )
self.Update( )
for item in self.blissBook[ self.panelIteration ]:
if item[ 1 ] == self.rowIteration * self.numberOfColumns[ self.panelIteration ] + self.columnIteration + 1:
self.bitmapSize = item[ 0 ].GetSize( )
if self.bitmapSize[ 1 ] > 0.7 * self.panelSize[ 1 ]:
image = wx.ImageFromBitmap( item[ 0 ] )
rescaleImage = image.Rescale( ( 0.7 * self.panelSize[ 1 ] / self.bitmapSize[ 1 ] ) * self.bitmapSize[ 0 ], 0.7 * self.panelSize[ 1 ], wx.IMAGE_QUALITY_HIGH )
rescaleItem = wx.BitmapFromImage( image )
b = GenSymbolTextButton( self , -1 , bitmap = rescaleItem, label = item[ 2 ] )
else:
b = GenSymbolTextButton( self , -1 , bitmap = item[ 0 ], label = item[ 2 ] )
b.SetFont( wx.Font( 21, wx.FONTFAMILY_ROMAN, wx.FONTWEIGHT_LIGHT, False ) )
b.SetBackgroundColour( self.backgroundColour )
self.displaySizer.Add( b, 0, flag = wx.EXPAND | wx.BOTTOM | wx.TOP | wx.ALIGN_LEFT, border = 2 )
self.displaySizer.Layout( )
unicodeLabel = item[ 2 ].decode( 'utf-8' )
self.lastTextLenght = len( unicodeLabel ) + 1
time.sleep( 0.5 )
os.system( 'milena_say %s' % item[ 2 ] )
self.numberOfSymbol += 1
selectedButton.SetBackgroundColour( self.backgroundColour )
self.flag = 'panel'
self.panelIteration = 0
self.rowIteration = 0
self.columnIteration = 0
self.count = 0
self.numberOfPresses += 1
#-------------------------------------------------------------------------
def timerUpdate(self , event):
self.mouseCursor.move( *self.mousePosition )
self.numberOfPresses = 0
if self.flag == 'panel': ## flag == panel ie. switching between panels
self.panelIteration += 1
if self.panelIteration == len( self.blissBook ):
self.panelIteration = 0
if self.panelIteration == len( self.blissBook ) - 1:
self.lastPageFlipSound.play( )
else:
self.pageFlipSounds[ self.panelIteration % len( self.pageFlipSounds ) ].play( )
for item in range( len( self.blissBook ) ):
if item != self.panelIteration:
self.mainSizer.Show( item = self.subSizers[ item ], show = False, recursive = True )
self.mainSizer.Show( item = self.subSizers[ self.panelIteration ], show = True, recursive = True )
self.SetSizer( self.mainSizer )
self.Layout( )
if self.flag == 'row': #flag == row ie. switching between rows
if self.countRows == self.countMaxRows:
self.flag = 'panel'
self.countRows = 0
items = self.subSizers[ self.panelIteration ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
else:
if self.rowIteration == self.numberOfRows[ self.panelIteration ]:
self.rowIteration = 0
if self.rowIteration == self.numberOfRows[ self.panelIteration ] - 1:
self.countRows += 1
items = self.subSizers[ self.panelIteration ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
zakres = range( self.rowIteration * self.numberOfColumns[ self.panelIteration ], self.rowIteration * self.numberOfColumns[ self.panelIteration ] + self.numberOfColumns[ self.panelIteration ] )
for i in zakres:
item = self.subSizers[ self.panelIteration ].GetItem( i )
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
self.rowIteration += 1
os.system( 'milena_say %i' % ( self.rowIteration ) )
elif self.flag == 'columns': #flag = columns ie. switching between cells in the particular row
if self.countColumns == self.countMaxColumns:
self.flag = 'row'
self.rowIteration = 0
self.columnIteration = 0
self.countColumns = 0
self.countRows = 0
items = self.subSizers[ self.panelIteration ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
else:
if self.columnIteration == self.numberOfColumns[ self.panelIteration ]:
self.columnIteration = 0
if self.columnIteration == self.numberOfColumns[ self.panelIteration ] - 1:
self.countColumns += 1
items = self.subSizers[ self.panelIteration ].GetChildren( )
for item in items:
b = item.GetWindow( )
b.SetBackgroundColour( self.backgroundColour )
b.SetFocus( )
item = self.subSizers[ self.panelIteration ].GetItem( self.rowIteration * self.numberOfColumns[ self.panelIteration ] + self.columnIteration )
b = item.GetWindow( )
b.SetBackgroundColour( self.scanningColour )
b.SetFocus( )
self.columnIteration += 1
os.system( 'milena_say %i' % ( self.columnIteration ) )
#=============================================================================
if __name__ == '__main__':
app = wx.PySimpleApp( )
frame = bliss_symbols( parent = None, id = -1 )
frame.Show( )
app.MainLoop( )
| BrainTech/pre-pisak | modules/others/symbols.py | Python | gpl-3.0 | 23,745 |
class Amiibro:
def __init__(self):
self._amiibos={#'hex': {'name': 'Noname', 'method': methodObject, 'params': params}
}
def handleTag(self, hex=None):
if hex in self._amiibos:
print(self._amiibos[hex]['name'])
if self._amiibos[hex]['params']:
self._amiibos[hex]['method'](self._amiibos[hex]['params'])
else:
self._amiibos[hex]['method']()
return self._amiibos[hex]['name']
| pgum/haus | hauser/amiibro.py | Python | gpl-3.0 | 501 |
# -*- coding=utf-8 -*-
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
setup(name='edem.content.logo',
version=version,
description="Logos for forums.e-democracy.org",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers for values
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux"
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Bill Bushey',
author_email='bill.bushey@e-democracy.org',
url='http://www.e-democracy.org/',
license='GPL 3',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['edem', 'edem.content'],
include_package_data=True,
zip_safe=True,
install_requires=[
'setuptools',
'edem.skin',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",)
| e-democracy/edem.content.logo | setup.py | Python | gpl-3.0 | 1,382 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
#
# This file is a module for Ansible that interacts with Network Manager
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION='''
---
module: nmcli
author: Chris Long
short_description: Manage Networking
requirements: [ nmcli, dbus ]
version_added: "2.0"
description:
- Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc.
options:
state:
required: True
choices: [ present, absent ]
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
autoconnect:
required: False
default: "yes"
choices: [ "yes", "no" ]
description:
- Whether the connection should start on boot.
- Whether the connection profile can be automatically activated
conn_name:
required: True
description:
- 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
ifname:
required: False
default: conn_name
description:
- Where IFNAME will be the what we call the interface name.
- interface to bind the connection to. The connection will only be applicable to this interface name.
- A special value of "*" can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
type:
required: False
choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ]
description:
- This is the type of device or network connection that you wish to create.
mode:
required: False
choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
default: balence-rr
description:
- This is the type of device or network connection that you wish to create for a bond, team or bridge.
master:
required: False
default: None
description:
- master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
ip4:
required: False
default: None
description:
- 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"'
gw4:
required: False
description:
- 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"'
dns4:
required: False
default: None
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]'
ip6:
required: False
default: None
description:
- 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
gw6:
required: False
default: None
description:
- 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
dns6:
required: False
description:
- 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]'
mtu:
required: False
default: 1500
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
primary:
required: False
default: None
description:
- This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
miimon:
required: False
default: 100
description:
- This is only used with bond - miimon
downdelay:
required: False
default: None
description:
- This is only used with bond - downdelay
updelay:
required: False
default: None
description:
- This is only used with bond - updelay
arp_interval:
required: False
default: None
description:
- This is only used with bond - ARP interval
arp_ip_target:
required: False
default: None
description:
- This is only used with bond - ARP IP target
stp:
required: False
default: None
description:
- This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
priority:
required: False
default: 128
description:
- This is only used with 'bridge' - sets STP priority
forwarddelay:
required: False
default: 15
description:
- This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
hellotime:
required: False
default: 2
description:
- This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
maxage:
required: False
default: 20
description:
- This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
ageingtime:
required: False
default: 300
description:
- This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
mac:
required: False
default: None
description:
- 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)'
slavepriority:
required: False
default: 32
description:
- This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
path_cost:
required: False
default: 100
description:
- This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
hairpin:
required: False
default: yes
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on.
vlanid:
required: False
default: None
description:
- This is only used with VLAN - VLAN ID in range <0-4095>
vlandev:
required: False
default: None
description:
- This is only used with VLAN - parent device this VLAN is on, can use ifname
flags:
required: False
default: None
description:
- This is only used with VLAN - flags
ingress:
required: False
default: None
description:
- This is only used with VLAN - VLAN ingress priority mapping
egress:
required: False
default: None
description:
- This is only used with VLAN - VLAN egress priority mapping
'''
EXAMPLES='''
The following examples are working examples that I have run in the field. I followed follow the structure:
```
|_/inventory/cloud-hosts
| /group_vars/openstack-stage.yml
| /host_vars/controller-01.openstack.host.com
| /host_vars/controller-02.openstack.host.com
|_/playbook/library/nmcli.py
| /playbook-add.yml
| /playbook-del.yml
```
## inventory examples
### groups_vars
```yml
---
#devops_os_define_network
storage_gw: "192.168.0.254"
external_gw: "10.10.0.254"
tenant_gw: "172.100.0.254"
#Team vars
nmcli_team:
- {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
- {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
nmcli_team_slave:
- {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
#bond vars
nmcli_bond:
- {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'}
- {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'}
- {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'}
nmcli_bond_slave:
- {conn_name: 'em1', ifname: 'em1', master: 'tenant'}
- {conn_name: 'em2', ifname: 'em2', master: 'tenant'}
- {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'}
- {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'}
#ethernet vars
nmcli_ethernet:
- {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"}
- {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"}
- {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"}
```
### host_vars
```yml
---
storage_ip: "192.168.160.21/23"
external_ip: "10.10.152.21/21"
tenant_ip: "192.168.200.21/23"
```
## playbook-add.yml example
```yml
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: install needed network manager libs
yum: name={{ item }} state=installed
with_items:
- libnm-qt-devel.x86_64
- nm-connection-editor.x86_64
- libsemanage-python
- policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
with_items:
- "{{nmcli_team}}"
- name: try nmcli add teams-slave
nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
with_items:
- "{{nmcli_team_slave}}"
###### Working with all cloud nodes - Bonding
# - name: try nmcli add bond - conn_name only & ip4 gw4 mode
# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present
# with_items:
# - "{{nmcli_bond}}"
#
# - name: try nmcli add bond-slave
# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present
# with_items:
# - "{{nmcli_bond_slave}}"
##### Working with all cloud nodes - Ethernet
# - name: nmcli add Ethernet - conn_name only & ip4 gw4
# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present
# with_items:
# - "{{nmcli_ethernet}}"
```
## playbook-del.yml example
```yml
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: try nmcli del team - multiple
nmcli: conn_name={{item.conn_name}} state=absent
with_items:
- { conn_name: 'em1'}
- { conn_name: 'em2'}
- { conn_name: 'p1p1'}
- { conn_name: 'p1p2'}
- { conn_name: 'p2p1'}
- { conn_name: 'p2p2'}
- { conn_name: 'tenant'}
- { conn_name: 'storage'}
- { conn_name: 'external'}
- { conn_name: 'team-em1'}
- { conn_name: 'team-em2'}
- { conn_name: 'team-p1p1'}
- { conn_name: 'team-p1p2'}
- { conn_name: 'team-p2p1'}
- { conn_name: 'team-p2p2'}
```
# To add an Ethernet connection with static IP configuration, issue a command as follows
- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# To add an Team connection with static IP configuration, issue a command as follows
- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present
# To add two IPv4 DNS server addresses:
-nmcli: conn_name=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present
# To change the property of a setting e.g. MTU, issue a command as follows:
- nmcli: conn_name=my-eth1 mtu=9000 state=present
Exit Status's:
- nmcli exits with status 0 if it succeeds, a value greater than 0 is
returned if an error occurs.
- 0 Success - indicates the operation succeeded
- 1 Unknown or unspecified error
- 2 Invalid user input, wrong nmcli invocation
- 3 Timeout expired (see --wait option)
- 4 Connection activation failed
- 5 Connection deactivation failed
- 6 Disconnecting device failed
- 7 Connection deletion failed
- 8 NetworkManager is not running
- 9 nmcli and NetworkManager versions mismatch
- 10 Connection, device, or access point does not exist.
'''
# import ansible.module_utils.basic
import os
import syslog
import sys
import dbus
from gi.repository import NetworkManager, NMClient
class Nmcli(object):
"""
This is the generic nmcli manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:-
- create_connection()
- delete_connection()
- modify_connection()
- show_connection()
- up_connection()
- down_connection()
All subclasses MUST define platform and distribution (which may be None).
"""
platform='Generic'
distribution=None
bus=dbus.SystemBus()
# The following is going to be used in dbus code
DEVTYPES={1: "Ethernet",
2: "Wi-Fi",
5: "Bluetooth",
6: "OLPC",
7: "WiMAX",
8: "Modem",
9: "InfiniBand",
10: "Bond",
11: "VLAN",
12: "ADSL",
13: "Bridge",
14: "Generic",
15: "Team"
}
STATES={0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed"
}
def __init__(self, module):
self.module=module
self.state=module.params['state']
self.autoconnect=module.params['autoconnect']
self.conn_name=module.params['conn_name']
self.master=module.params['master']
self.ifname=module.params['ifname']
self.type=module.params['type']
self.ip4=module.params['ip4']
self.gw4=module.params['gw4']
self.dns4=module.params['dns4']
self.ip6=module.params['ip6']
self.gw6=module.params['gw6']
self.dns6=module.params['dns6']
self.mtu=module.params['mtu']
self.stp=module.params['stp']
self.priority=module.params['priority']
self.mode=module.params['mode']
self.miimon=module.params['miimon']
self.downdelay=module.params['downdelay']
self.updelay=module.params['updelay']
self.arp_interval=module.params['arp_interval']
self.arp_ip_target=module.params['arp_ip_target']
self.slavepriority=module.params['slavepriority']
self.forwarddelay=module.params['forwarddelay']
self.hellotime=module.params['hellotime']
self.maxage=module.params['maxage']
self.ageingtime=module.params['ageingtime']
self.mac=module.params['mac']
self.vlanid=module.params['vlanid']
self.vlandev=module.params['vlandev']
self.flags=module.params['flags']
self.ingress=module.params['ingress']
self.egress=module.params['egress']
# select whether we dump additional debug info through syslog
self.syslogging=True
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def merge_secrets(self, proxy, config, setting_name):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
secrets=proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key]=secrets[setting][key]
except Exception, e:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
dstr=""
for key in d:
val=d[key]
str_val=""
add_string=True
if type(val)==type(dbus.Array([])):
for elt in val:
if type(elt)==type(dbus.Byte(1)):
str_val+="%s " % int(elt)
elif type(elt)==type(dbus.String("")):
str_val+="%s" % elt
elif type(val)==type(dbus.Dictionary({})):
dstr+=self.dict_to_string(val)
add_string=False
else:
str_val=val
if add_string:
dstr+="%s: %s\n" % ( key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
setting_list=[]
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
# print ""
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus=dbus.SystemBus()
service_name="org.freedesktop.NetworkManager"
proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
connection_paths=settings.ListConnections()
connection_list=[]
# List each connection's name, UUID, and type
for path in connection_paths:
con_proxy=bus.get_object(service_name, path)
settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
config=settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
# you only need 'wifi' secrets or '802.1x' secrets, not everything) and
# merge that into the configuration data - To use at a later stage
self.merge_secrets(settings_connection, config, '802-11-wireless')
self.merge_secrets(settings_connection, config, '802-11-wireless-security')
self.merge_secrets(settings_connection, config, '802-1x')
self.merge_secrets(settings_connection, config, 'gsm')
self.merge_secrets(settings_connection, config, 'cdma')
self.merge_secrets(settings_connection, config, 'ppp')
# Get the details of the 'connection' setting
s_con=config['connection']
connection_list.append(s_con['id'])
connection_list.append(s_con['uuid'])
connection_list.append(s_con['type'])
connection_list.append(self.connection_to_string(config))
return connection_list
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
connections=self.list_connection_info()
for con_item in connections:
if self.conn_name==con_item:
return True
def down_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# if self.connection_exists():
cmd.append('con')
cmd.append('down')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def up_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('up')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def create_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('team')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def modify_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
# Can't use MTU with team
return cmd
def create_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append(self.type)
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
# if self.mtu is not None:
# cmd.append('802-3-ethernet.mtu')
# cmd.append(self.mtu)
return cmd
def modify_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
return cmd
def create_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('bond')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
if self.mode is not None:
cmd.append('mode')
cmd.append(self.mode)
if self.miimon is not None:
cmd.append('miimon')
cmd.append(self.miimon)
if self.downdelay is not None:
cmd.append('downdelay')
cmd.append(self.downdelay)
if self.downdelay is not None:
cmd.append('updelay')
cmd.append(self.updelay)
if self.downdelay is not None:
cmd.append('arp-interval')
cmd.append(self.arp_interval)
if self.downdelay is not None:
cmd.append('arp-ip-target')
cmd.append(self.arp_ip_target)
return cmd
def modify_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def create_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append('bond-slave')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
return cmd
def create_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('ethernet')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def modify_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw4)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.autoconnect)
return cmd
def create_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bridge interface
return cmd
def modify_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bridge interface
return cmd
def create_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
return cmd
def modify_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
return cmd
def create_connection(self):
cmd=[]
if self.type=='team':
# cmd=self.create_connection_team()
if (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_team()
self.execute_command(cmd)
cmd=self.modify_connection_team()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
elif (self.dns4 is None) or (self.dns6 is None):
cmd=self.create_connection_team()
return self.execute_command(cmd)
elif self.type=='team-slave':
if self.mtu is not None:
cmd=self.create_connection_team_slave()
self.execute_command(cmd)
cmd=self.modify_connection_team_slave()
self.execute_command(cmd)
# cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_team_slave()
return self.execute_command(cmd)
elif self.type=='bond':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_bond()
self.execute_command(cmd)
cmd=self.modify_connection_bond()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_bond()
return self.execute_command(cmd)
elif self.type=='bond-slave':
cmd=self.create_connection_bond_slave()
elif self.type=='ethernet':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_ethernet()
self.execute_command(cmd)
cmd=self.modify_connection_ethernet()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_ethernet()
return self.execute_command(cmd)
elif self.type=='bridge':
cmd=self.create_connection_bridge()
elif self.type=='vlan':
cmd=self.create_connection_vlan()
return self.execute_command(cmd)
def remove_connection(self):
# self.down_connection()
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('del')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def modify_connection(self):
cmd=[]
if self.type=='team':
cmd=self.modify_connection_team()
elif self.type=='team-slave':
cmd=self.modify_connection_team_slave()
elif self.type=='bond':
cmd=self.modify_connection_bond()
elif self.type=='bond-slave':
cmd=self.modify_connection_bond_slave()
elif self.type=='ethernet':
cmd=self.modify_connection_ethernet()
elif self.type=='bridge':
cmd=self.modify_connection_bridge()
elif self.type=='vlan':
cmd=self.modify_connection_vlan()
return self.execute_command(cmd)
def main():
# Parsing argument file
module=AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
conn_name=dict(required=True, type='str'),
master=dict(required=False, default=None, type='str'),
ifname=dict(required=False, default=None, type='str'),
type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'),
ip4=dict(required=False, default=None, type='str'),
gw4=dict(required=False, default=None, type='str'),
dns4=dict(required=False, default=None, type='str'),
ip6=dict(required=False, default=None, type='str'),
gw6=dict(required=False, default=None, type='str'),
dns6=dict(required=False, default=None, type='str'),
# Bond Specific vars
mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'),
miimon=dict(required=False, default=None, type='str'),
downdelay=dict(required=False, default=None, type='str'),
updelay=dict(required=False, default=None, type='str'),
arp_interval=dict(required=False, default=None, type='str'),
arp_ip_target=dict(required=False, default=None, type='str'),
# general usage
mtu=dict(required=False, default=None, type='str'),
mac=dict(required=False, default=None, type='str'),
# bridge specific vars
stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'),
priority=dict(required=False, default="128", type='str'),
slavepriority=dict(required=False, default="32", type='str'),
forwarddelay=dict(required=False, default="15", type='str'),
hellotime=dict(required=False, default="2", type='str'),
maxage=dict(required=False, default="20", type='str'),
ageingtime=dict(required=False, default="300", type='str'),
# vlan specific vars
vlanid=dict(required=False, default=None, type='str'),
vlandev=dict(required=False, default=None, type='str'),
flags=dict(required=False, default=None, type='str'),
ingress=dict(required=False, default=None, type='str'),
egress=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
nmcli=Nmcli(module)
rc=None
out=''
err=''
result={}
result['conn_name']=nmcli.conn_name
result['state']=nmcli.state
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
# team-slave checks
if nmcli.type=='team-slave' and nmcli.master is None:
nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
if nmcli.type=='team-slave' and nmcli.ifname is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
if nmcli.state=='absent':
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.down_connection()
(rc, out, err)=nmcli.remove_connection()
if rc!=0:
module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state=='present':
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
result['Exists']='Connections do exist so we are modifying them'
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.modify_connection()
if not nmcli.connection_exists():
result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.create_connection()
if rc is not None and rc!=0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
result['changed']=False
else:
result['changed']=True
if out:
result['stdout']=out
if err:
result['stderr']=err
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| wehkamp/ansible-modules-extras | network/nmcli.py | Python | gpl-3.0 | 40,826 |
#!/usr/bin/env python
"""Read the Google Weather API and emit data in the usual format.
Write a sample every 5 minutes on stdout, log on stderr.
"""
import sys
import time
from urllib import quote_plus
from urllib2 import urlopen
from datetime import datetime, timedelta
from xml.etree import cElementTree as ET
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def main():
location = sys.argv[1]
url = ("http://www.google.com/ig/api?weather="
+ quote_plus(location))
while 1:
ts = datetime.now()
try:
et = ET.parse(urlopen(url))
value = float(et.getroot()
.find('weather')
.find('current_conditions')
.find('temp_c')
.attrib['data'])
except Exception, e:
logger.error("error reading: %s", e)
else:
print ts.strftime("%Y-%m-%d %H:%M:%S"), value
time.sleep(5 * 60)
if __name__ == '__main__':
sys.exit(main())
| dvarrazzo/arduino | thermo/client/google_api.py | Python | gpl-3.0 | 1,030 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import print_function
'''
Collected utilities for pygame
It is difficult to write pixels directly in python.
There's some way to get a framebuffer back from Tk, but it is
cumberosme.
The module pygame supports sending pixel buffers,
which is wrapped for convneinece in this module.
example usage
import neurotools.graphics.pygame as npg
import time
import numpy as np
import pygame
K = 128
screen = npg.start(K,K,'Image data')
dt = 1/20
wait_til = time.time() + dt
print('Animating..')
for i in neurotools.tools.progress_bar(range(100)):
t = time.time()
if t<wait_til: time.sleep(wait_til-t)
wait_til = t + dt
npg.draw_array(screen, np.random.rand(K,K,3))
pygame.quit()
'''
import sys
import numpy as np
try:
import pygame as pg
except:
print('pygame package is missing; it is obsolete so this is not unusual')
print('pygame graphics will not work')
pg = None
def enable_vsync():
if sys.platform != 'darwin':
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
# set v to 1 to enable vsync, 0 to disable vsync
v = ctypes.c_int(1)
ogl.CGLSetParameter(ogl.CGLGetCurrentContext(), ctypes.c_int(222), ctypes.pointer(v))
except:
print("Unable to set vsync mode, using driver defaults")
def start(W,H,name='untitled'):
# Get things going
pg.quit()
pg.init()
enable_vsync()
window = pg.display.set_mode((W,H))
pg.display.set_caption(name)
return window
def draw_array(screen,rgbdata,doshow=True):
'''
Send array data to a PyGame window.
PyGame is BRG order which is unusual -- reorder it.
Parameters
----------
screen : object
Object returned by neurotools.graphics.pygame.start
rgbdata :
RGB image data with color values in [0,1]
'''
# Cast to int
rgbdata = np.int32(rgbdata*255)
# clip bytes to 0..255 range
rgbdata[rgbdata<0]=0
rgbdata[rgbdata>255]=255
# get color dimension
if len(rgbdata.shape)==3:
w,h,d = rgbdata.shape
else:
w,h = rgbdata.shape
d=1
# repack color data in screen format
draw = np.zeros((w,h,4),'uint8')
if d==1:
draw[...,0]=rgbdata
draw[...,1]=rgbdata
draw[...,2]=rgbdata
draw[...,3]=255 # alpha channel
if d==3:
draw[...,:3]=rgbdata[...,::-1]
draw[...,-1]=255 # alpha channel
if d==4:
draw[...,:3]=rgbdata[...,-2::-1]
draw[...,-1]=rgbdata[...,-1]
# get surface and copy data to sceeen
surface = pg.Surface((w,h))
numpy_surface = np.frombuffer(surface.get_buffer())
numpy_surface[...] = np.frombuffer(draw)
del numpy_surface
screen.blit(surface,(0,0))
if doshow:
pg.display.update()
| michaelerule/neurotools | graphics/pygame.py | Python | gpl-3.0 | 3,004 |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of the :class:`~matplotlib.axes.Axes` class with
user-friendly attributes
"""
from six import string_types
from matplotlib.axes import Axes as _Axes
from matplotlib.artist import Artist
from matplotlib.projections import register_projection
from .decorators import auto_refresh
from . import (rcParams, tex, html)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
class Axes(_Axes):
"""An extension of the core matplotlib :class:`~matplotlib.axes.Axes`.
These custom `Axes` provide only some simpler attribute accessors.
Notes
-----
A new set of `Axes` should be constructed via::
>>> plot.add_subplots(111, projection='xxx')
where plot is a :class:`~gwpy.plotter.Plot` figure, and ``'xxx'``
is the name of the `Axes` you want to add.
"""
projection = 'rectilinear'
def __init__(self, *args, **kwargs):
super(Axes, self).__init__(*args, **kwargs)
self.xaxis.labelpad = 10
__init__.__doc__ = _Axes.__init__.__doc__
# -----------------------------------------------
# text properties
# x-axis label
@property
def xlabel(self):
"""Label for the x-axis
:type: :class:`~matplotlib.text.Text`
"""
return self.xaxis.label
@xlabel.setter
@auto_refresh
def xlabel(self, text):
if isinstance(text, string_types):
self.set_xlabel(text)
else:
self.xaxis.label = text
@xlabel.deleter
@auto_refresh
def xlabel(self):
self.set_xlabel("")
# y-axis label
@property
def ylabel(self):
"""Label for the y-axis
:type: :class:`~matplotlib.text.Text`
"""
return self.yaxis.label
@ylabel.setter
@auto_refresh
def ylabel(self, text):
if isinstance(text, string_types):
self.set_ylabel(text)
else:
self.yaxis.label = text
@ylabel.deleter
@auto_refresh
def ylabel(self):
self.set_ylabel("")
# -----------------------------------------------
# limit properties
@property
def xlim(self):
"""Limits for the x-axis
:type: `tuple`
"""
return self.get_xlim()
@xlim.setter
@auto_refresh
def xlim(self, limits):
self.set_xlim(*limits)
@xlim.deleter
@auto_refresh
def xlim(self):
self.relim()
self.autoscale_view(scalex=True, scaley=False)
@property
def ylim(self):
"""Limits for the y-axis
:type: `tuple`
"""
return self.get_ylim()
@ylim.setter
@auto_refresh
def ylim(self, limits):
self.set_ylim(*limits)
@ylim.deleter
def ylim(self):
self.relim()
self.autoscale_view(scalex=False, scaley=True)
# -----------------------------------------------
# scale properties
@property
def logx(self):
"""Display the x-axis with a logarithmic scale
:type: `bool`
"""
return self.get_xscale() == "log"
@logx.setter
@auto_refresh
def logx(self, log):
if log and not self.logx:
self.set_xscale('log')
elif self.logx and not log:
self.set_xscale('linear')
@property
def logy(self):
"""Display the y-axis with a logarithmic scale
:type: `bool`
"""
return self.get_yscale() == "log"
@logy.setter
@auto_refresh
def logy(self, log):
if log and not self.logy:
self.set_yscale('log')
elif self.logy and not log:
self.set_yscale('linear')
# -------------------------------------------
# Axes methods
@auto_refresh
def resize(self, pos, which='both'):
"""Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
"""
return super(Axes, self).set_position(pos, which=which)
@auto_refresh
def add_label_unit(self, unit, axis='x'):
label = getattr(self, 'get_%slabel' % axis)()
if not label:
label = unit.__doc__
if rcParams.get("text.usetex", False):
unitstr = tex.unit_to_latex(unit)
else:
unitstr = unit.to_string()
set_ = getattr(self, 'set_%slabel' % axis)
if label:
set_("%s [%s]" % (label, unitstr))
else:
set_(unitstr)
def legend(self, *args, **kwargs):
# set kwargs
alpha = kwargs.pop("alpha", 0.8)
linewidth = kwargs.pop("linewidth", 8)
# make legend
legend = super(Axes, self).legend(*args, **kwargs)
# find relevant axes
if legend is not None:
lframe = legend.get_frame()
lframe.set_alpha(alpha)
[l.set_linewidth(linewidth) for l in legend.get_lines()]
return legend
legend.__doc__ = _Axes.legend.__doc__
def html_map(self, imagefile, data=None, **kwargs):
"""Create an HTML map for some data contained in these `Axes`
Parameters
----------
data : `~matplotlib.artist.Artist`, `~gwpy.types.Series`, `array-like`
data to map, one of an `Artist` already drawn on these axes (
via :meth:`plot` or :meth:`scatter`, for example) or a data set
imagefile : `str`
path to image file on disk for the containing `Figure`
mapname : `str`, optional
ID to connect <img> tag and <map> tags, default: ``'points'``. This
should be unique if multiple maps are to be written to a single
HTML file.
shape : `str`, optional
shape for <area> tag, default: ``'circle'``
standalone : `bool`, optional
wrap map HTML with required HTML5 header and footer tags,
default: `True`
title : `str`, optional
title name for standalone HTML page
jquery : `str`, optional
URL of jquery script, defaults to googleapis.com URL
Returns
-------
HTML : `str`
string of HTML markup that defines the <img> and <map>
"""
if data is None:
artists = self.lines + self.collections + self.images
if len(artists) != 1:
raise ValueError("Cannot determine artist to map, %d found."
% len(artists))
data = artists[0]
if isinstance(data, Artist):
return html.map_artist(data, imagefile, **kwargs)
else:
return html.map_data(data, self, imagefile, **kwargs)
register_projection(Axes)
| andrew-lundgren/gwpy | gwpy/plotter/axes.py | Python | gpl-3.0 | 7,620 |
'''
Library for doing fun things with computers.
'''
__author__ = 'Andrew M Bates'
__version__ = '0.001'
import io, os, sys
# the core imports go here
# this should go in in the mods dir
try:
'''IF RASPBERRY PI & HAS A GPIO BOARD'''
import RPi.GPIO as RPi
except ImportError:
pass
| andrewbates09/FERGUS | fergus/__init__.py | Python | gpl-3.0 | 291 |
#!/usr/bin/env python3
"""Retrieve results from the DuckDuckGo zero-click API in simple HTML format."""
import json as jsonlib
import logging
import re
import urllib.request, urllib.error, urllib.parse
__version__ = (1, 0, 0)
def results2html(results, results_priority=None, max_number_of_results=None,
ignore_incomplete=True, always_show_related=False,
header_start_level=1, hide_headers=False, hide_signature=False):
if not results:
return ''
if not results_priority:
results_priority = ['answer', 'abstract', 'definition', 'results',
'infobox', 'redirect', 'related']
if not always_show_related:
other = [x for x in results_priority if x != 'related']
if any(results.get(x).is_complete() for x in other):
results_priority = other
html_header = '<h{level:d}>{title}</h{level:d}>'
html_paragraph = '<p>{contents}</p>'
html_contents = []
children = [results.get(x) for x in results_priority]
results_count = 0
for level, child in _iterchildren(header_start_level, children):
html = child.as_html()
valid = html and (not ignore_incomplete or child.is_complete())
if not hide_headers and child.name and (valid or child.children()):
header = html_header.format(title=child.name, level=level)
html_contents.append(header)
if valid:
html_contents.append(html_paragraph.format(contents=html))
results_count += 1
if max_number_of_results and results_count >= max_number_of_results:
break
html_contents[:] = [x for x in html_contents if x]
if not html_contents:
return ''
if not hide_signature:
html_contents.append('<footer><small>Results from DuckDuckGo</small></footer>')
return ''.join(html_contents).strip()
def search(query, useragent='duckduckgo2html', **kwargs):
params = {
'q': query,
'format': 'json',
'pretty': '1',
'no_redirect': '1',
'no_html': '1',
'skip_disambig': '0',
}
params.update(kwargs)
enc_params = urllib.parse.urlencode(params)
url = 'http://api.duckduckgo.com/?' + enc_params
try:
request = urllib.request.Request(url, headers={'User-Agent': useragent})
response = urllib.request.urlopen(request)
json = jsonlib.loads(response.read().decode('utf-8'))
response.close()
return Results(json)
except urllib.error.HTTPError as err:
logging.error('Query failed with HTTPError code %s', err.code)
except urllib.error.URLError as err:
logging.error('Query failed with URLError %s', err.reason)
except Exception:
logging.error('Unhandled exception')
raise
return None
def _iterchildren(start_level, children):
for item in children:
grandchildren = item.children()
yield start_level, item
if grandchildren:
for subitem in _iterchildren(start_level+1, grandchildren):
yield subitem
def _html_url(url, display=None):
if not display:
display = url
return '<a href="{0}">{1}</a>'.format(url, display)
class Results(object):
def __init__(self, json):
self.json = jsonlib.dumps(json, indent=2)
self.type = json.get('Type')
self.answer = Answer(json)
self.results = _ResultList('Results', json.get('Results', []))
self.related = _ResultList('Related Topics', json.get('RelatedTopics', []))
self.abstract = Abstract(json)
self.definition = Definition(json)
self.redirect = Redirect(json)
self.infobox = Infobox(json)
def get(self, name):
if hasattr(self, name) and getattr(self, name):
return getattr(self, name)
return _ResultItemBase()
class _ResultItemBase(object):
"""Base class for results"""
def __init__(self, name=None):
self.name = name
def is_complete(self):
return False
def children(self):
return []
def as_html(self):
return ''
class _ResultList(_ResultItemBase):
"""A list of results"""
def __init__(self, name, items):
super().__init__(name)
self.items = [Result(x) for x in items]
def children(self):
return self.items
class Result(_ResultItemBase):
def __init__(self, json):
super().__init__(json.get('Name', '') if json else '')
self.topics = [Result(elem) for elem in json.get('Topics', [])]
self.html = json.get('Result', '') if json else ''
self.text = json.get('Text', '') if json else ''
self.url = json.get('FirstURL', '') if json else ''
def is_complete(self):
return True if self.text else False
def children(self):
return self.topics
def as_html(self):
if self.html:
return Result._rex_sub.sub('a> ', self.html)
elif self.text:
return self.text
_rex_sub = re.compile(r'a>(?! )')
class Abstract(_ResultItemBase):
def __init__(self, json):
super().__init__('Abstract')
self.html = json['Abstract']
self.text = json['AbstractText']
self.url = json['AbstractURL']
self.source = json['AbstractSource']
self.heading = json['Heading']
def is_complete(self):
return True if self.html or self.text else False
def as_html(self):
html_list = []
if self.heading:
html_list.append('<b>{0}</b>'.format(self.heading))
if self.html:
html_list.append(self.html)
elif self.text:
html_list.append(self.text)
if self.url:
html_list.append(_html_url(self.url, self.source))
return ' - '.join(html_list)
class Answer(_ResultItemBase):
def __init__(self, json):
super().__init__('Answer')
self.text = json['Answer']
self.type = json['AnswerType']
self.url = None
def is_complete(self):
return True if self.text else False
def as_html(self):
return self.text.replace('\n', '<br>').replace('\r', '')
class Definition(_ResultItemBase):
def __init__(self, json):
super().__init__('Definition')
self.text = json['Definition']
self.url = json['DefinitionURL']
self.source = json['DefinitionSource']
def is_complete(self):
return True if self.text else False
def as_html(self):
if self.text and self.url:
return self.text + ' - ' + _html_url(self.url, self.source)
elif self.text:
return self.text
elif self.url:
return _html_url(self.url, self.source)
class Redirect(_ResultItemBase):
def __init__(self, json):
super().__init__('Redirect')
self.url = json['Redirect']
def is_complete(self):
return True if self.url else False
def as_html(self):
return _html_url(self.url) if self.url else None
class Infobox(_ResultItemBase):
class Content(object):
def __init__(self, json):
self.data_type = json.get('data_type', '') if json else ''
self.label = json.get('label', '') if json else ''
self.value = json.get('value', '') if json else ''
def as_html(self):
if self.data_type == 'string' and self.label and self.value:
return '<b>{0}</b> {1}'.format(self.label, self.value)
def __init__(self, json):
super().__init__('Infobox')
infobox = json.get('Infobox') if json.get('Infobox') else {}
self.meta = infobox.get('meta', [])
self.content = [Infobox.Content(x) for x in infobox.get('content', [])]
def is_complete(self):
return True if self.content else False
def as_html(self):
contents = [x.as_html() for x in self.content]
return '<br>'.join(x for x in contents if x)
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'query',
nargs='*',
help='the search query')
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s v{0}.{1}.{2}'.format(*__version__))
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(filename)s: %(message)s')
if args.query:
queries = [' '.join(args.query)]
elif not sys.stdin.isatty():
queries = sys.stdin.read().splitlines()
else:
parser.print_help()
sys.exit(1)
for query in queries:
html = results2html(search(query))
if html:
print(html)
else:
logging.warning('No results found')
| nsubiron/SublimeSuricate | lib/thirdparty/duckduckgo2html.py | Python | gpl-3.0 | 8,822 |
"""
It is used to test client->db-> service.
It requires the Monitoring service to be running and installed (so discoverable in the .cfg),
and this monitoring service should be connecting to an ElasticSeach instance
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import tempfile
import time
from datetime import datetime
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC import gLogger
from DIRAC.MonitoringSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.Core.DISET.TransferClient import TransferClient
class MonitoringTestCase(unittest.TestCase):
def setUp(self):
gLogger.setLevel('DEBUG')
self.client = MonitoringClient()
self.data = [{u'Status': u'Waiting', 'Jobs': 2, u'timestamp': 1458130176, u'JobSplitType': u'MCStripping',
u'MinorStatus': u'unset', u'Site': u'LCG.GRIDKA.de', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049848', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458130176, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'LCG.PIC.es', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'olupton', u'JobGroup': u'lhcb', u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458130176, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'olupton', u'JobGroup': u'lhcb', u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458130176, u'JobSplitType': u'MCStripping',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049845', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 34, u'timestamp': 1458141578, u'JobSplitType': u'DataStripping',
u'MinorStatus': u'unset', u'Site': u'Group.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050299', u'UserGroup': u'lhcb_data', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 120, u'timestamp': 1458141578, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'LCG.CERN.ch', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'mvesteri', u'JobGroup': u'lhcb', u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458141578, u'JobSplitType': u'MCStripping',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049845', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 2, u'timestamp': 1458141578, u'JobSplitType': u'MCStripping',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049848', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458141578, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050286', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 95, u'timestamp': 1458199202, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'Multiple', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'mamartin', u'JobGroup': u'lhcb', u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 3, u'timestamp': 1458199202, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'Multiple', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'olupton', u'JobGroup': u'lhcb', u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 129, u'timestamp': 1458199202, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'Multiple', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049844', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 5, u'timestamp': 1458217812, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.IHEP.su', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050232', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 7, u'timestamp': 1458217812, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.IHEP.su', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050234', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458217812, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.IHEP.su', u'Reschedules': 1, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050236', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 3, u'timestamp': 1458217812, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.IHEP.su', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050238', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 2, u'timestamp': 1458217812, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.IHEP.su', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050248', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 12, u'timestamp': 1458218413, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050248', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 5, u'timestamp': 1458218413, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050250', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 4, u'timestamp': 1458218413, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050251', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458218413, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.CNAF.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050280', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 24, u'timestamp': 1458219012, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.NIKHEF.nl', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050248', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 3, u'timestamp': 1458219012, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.NIKHEF.nl', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050251', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458222013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.Bologna.it', u'Reschedules': 0,
u'ApplicationStatus': u'unset', u'User': u'phicharp', u'JobGroup': u'00050303',
u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 7, u'timestamp': 1458222013, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'LCG.Bristol.uk', u'Reschedules': 0,
u'ApplicationStatus': u'unset', u'User': u'clangenb', u'JobGroup': u'lhcb',
u'UserGroup': u'lhcb_user', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 2, u'timestamp': 1458222013, u'JobSplitType': u'User',
u'MinorStatus': u'unset', u'Site': u'LCG.Bristol.uk', u'Reschedules': 0,
u'ApplicationStatus': u'unset', u'User': u'mrwillia', u'JobGroup': u'lhcb',
u'UserGroup': u'lhcb_user',
u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458222013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.Bari.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050244', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 11, u'timestamp': 1458222013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.Bari.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050246', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 22, u'timestamp': 1458222013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.Bari.it', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050248', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 23, u'timestamp': 1458225013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.DESYZN.de', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049844', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 18, u'timestamp': 1458225013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.DESYZN.de', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00049847', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458225013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.DESYZN.de', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050238', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Running', 'Jobs': 1, u'timestamp': 1458225013, u'JobSplitType': u'MCSimulation',
u'MinorStatus': u'unset', u'Site': u'LCG.DESYZN.de', u'Reschedules': 0,
u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050246', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RRCKI.ru', u'Reschedules': 0,
u'ApplicationStatus': u'unset', u'User': u'phicharp', u'JobGroup': u'00050243',
u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RRCKI.ru', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050251', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCStripping',
u'MinorStatus': u'unset', u'Site': u'LCG.RRCKI.ru', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050256', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050229', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050241', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 1, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050243', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'},
{u'Status': u'Waiting', 'Jobs': 2, u'timestamp': 1458226213, u'JobSplitType': u'MCReconstruction',
u'MinorStatus': u'unset', u'Site': u'LCG.RAL.uk', u'Reschedules': 0, u'ApplicationStatus': u'unset',
u'User': u'phicharp', u'JobGroup': u'00050247', u'UserGroup': u'lhcb_mc', u'metric': u'WMSHistory'}]
def tearDown(self):
pass
class MonitoringInsertData(MonitoringTestCase):
def test_addMonitoringRecords(self):
result = self.client.addMonitoringRecords('moni', 'WMSHistory', self.data)
self.assertTrue(result['Message'])
def test_bulkinsert(self):
result = self.client.addRecords("wmshistory_index", "WMSHistory", self.data)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], len(self.data))
time.sleep(10)
class MonitoringTestChain(MonitoringTestCase):
def test_listReports(self):
result = self.client.listReports('WMSHistory')
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], ['AverageNumberOfJobs', 'NumberOfJobs', 'NumberOfReschedules'])
def test_listUniqueKeyValues(self):
result = self.client.listUniqueKeyValues('WMSHistory')
self.assertTrue(result['OK'])
self.assertTrue('Status' in result['Value'])
self.assertTrue('JobSplitType' in result['Value'])
self.assertTrue('MinorStatus' in result['Value'])
self.assertTrue('Site' in result['Value'])
self.assertTrue('ApplicationStatus' in result['Value'])
self.assertTrue('User' in result['Value'])
self.assertTrue('JobGroup' in result['Value'])
self.assertTrue('UserGroup' in result['Value'])
self.assertDictEqual(result['Value'], {u'Status': [],
u'JobSplitType': [],
u'MinorStatus': [],
u'Site': [],
u'ApplicationStatus': [],
u'User': [],
u'JobGroup': [],
u'UserGroup': []})
def test_generatePlot(self):
params = (
'WMSHistory', 'NumberOfJobs', datetime(
2016, 3, 16, 12, 30, 0, 0), datetime(
2016, 3, 17, 19, 29, 0, 0), {
'grouping': ['Site']}, 'Site', {})
result = self.client.generateDelayedPlot(*params)
self.assertTrue(result['OK'])
# self.assertEqual(
# result['Value'],
# {
# plot = 'Z:eNpljcEKwjAQRH8piWLbvQkeRLAeKnhOm7Us2CTsbsH69UYUFIQZZvawb4LUMKQYdjRoKH3kNGeK403W0JEiolSAMZ\
# xpwodXcsZukFZItipukFyxeSmiNIB3Zb_lUQL-wD4ssQYYc2Jt_VQuB-089cin6yH1Ur5FPev_\
# UgnrSjXfpRp0yfjGGLgcuz2JJl7wCYg6Slo='
# 'plot': plot,
# 'thumbnail': False})
def test_getPlot(self):
tempFile = tempfile.TemporaryFile()
transferClient = TransferClient('Monitoring/Monitoring')
params = (
'WMSHistory', 'NumberOfJobs', datetime(
2016, 3, 16, 12, 30, 0, 0), datetime(
2016, 3, 17, 19, 29, 0, 0), {
'grouping': ['Site']}, 'Site', {})
result = self.client.generateDelayedPlot(*params)
self.assertTrue(result['OK'])
result = transferClient.receiveFile(tempFile, result['Value']['plot'])
self.assertTrue(result['OK'])
def test_getReport(self):
params = (
'WMSHistory', 'NumberOfJobs', datetime(
2016, 3, 16, 12, 30, 0, 0), datetime(
2016, 3, 17, 19, 29, 0, 0), {
'grouping': ['Site']}, 'Site', {})
result = self.client.getReport(*params)
self.assertTrue(result['OK'])
self.assertDictEqual(result['Value'],
{'data': {u'Multiple': {1458198000: 227.0},
u'LCG.RRCKI.ru': {1458225000: 3.0},
u'LCG.IHEP.su': {1458217800: 18.0},
u'LCG.CNAF.it': {1458144000: None,
1458172800: None,
1458194400: None,
1458145800: None,
1458189000: None,
1458147600: None,
1458178200: None,
1458183600: None,
1458212400: None,
1458149400: None,
1458207000: None,
1458151200: None,
1458169200: None,
1458201600: None,
1458153000: None,
1458196200: None,
1458154800: None,
1458174600: None,
1458190800: None,
1458156600: None,
1458185400: None,
1458214200: None,
1458158400: None,
1458180000: None,
1458216000: None,
1458208800: None,
1458160200: None,
1458203400: None,
1458162000: None,
1458142200: None,
1458198000: None,
1458163800: None,
1458192600: None,
1458165600: None,
1458176400: None,
1458187200: None,
1458167400: None,
1458210600: None,
1458140400: 4.0,
1458181800: None,
1458205200: None,
1458171000: None,
1458217800: 22.0,
1458199800: None},
u'LCG.NIKHEF.nl': {1458217800: 27.0},
u'LCG.Bari.it': {1458221400: 34.0},
u'Group.RAL.uk': {1458140400: 34.0},
u'LCG.DESYZN.de': {1458225000: 43.0},
u'LCG.RAL.uk': {1458144000: None,
1458158400: None,
1458194400: None,
1458145800: None,
1458223200: None,
1458189000: None,
1458221400: None,
1458225000: 5.0,
1458147600: None,
1458135000: None,
1458183600: None,
1458212400: None,
1458149400: None,
1458178200: None,
1458207000: None,
1458151200: None,
1458169200: None,
1458172800: None,
1458219600: None,
1458201600: None,
1458153000: None,
1458196200: None,
1458154800: None,
1458160200: None,
1458190800: None,
1458156600: None,
1458185400: None,
1458214200: None,
1458129600: 2.0,
1458165600: None,
1458180000: None,
1458216000: None,
1458208800: None,
1458131400: None,
1458174600: None,
1458203400: None,
1458162000: None,
1458171000: None,
1458198000: None,
1458163800: None,
1458192600: None,
1458136800: None,
1458133200: None,
1458187200: None,
1458167400: None,
1458181800: None,
1458210600: None,
1458140400: None,
1458138600: None,
1458176400: None,
1458205200: None,
1458142200: None,
1458217800: None,
1458199800: None},
u'LCG.PIC.es': {1458129600: 1.0},
u'LCG.GRIDKA.de': {1458129600: 2.0},
u'LCG.Bristol.uk': {1458221400: 9.0},
u'LCG.CERN.ch': {1458140400: 120.0},
u'LCG.Bologna.it': {1458221400: 1.0}},
'granularity': 1800})
def test_getLastDayData(self):
params = {'Status': 'Running', 'Site': 'LCG.NIKHEF.nl'}
result = self.client.getLastDayData('WMSHistory', params)
self.assertTrue(result['OK'])
self.assertEqual(len(result['Value']), 2)
self.assertEqual(sorted(result['Value'][0].keys()), sorted([u'Status',
u'Jobs',
u'JobSplitType',
u'timestamp',
u'MinorStatus',
u'Site',
u'Reschedules',
u'ApplicationStatus',
u'User',
u'JobGroup',
u'UserGroup']))
class MonitoringDeleteChain(MonitoringTestCase):
def test_deleteNonExistingIndex(self):
res = self.client.deleteIndex("alllaaaaa")
self.assertTrue(res['Message'])
def test_deleteIndex(self):
today = datetime.today().strftime("%Y-%m-%d")
result = "%s-%s" % ('wmshistory_index', today)
res = self.client.deleteIndex(result)
self.assertTrue(res['OK'])
self.assertTrue('_index-%s' % today in res['Value'])
if __name__ == '__main__':
testSuite = unittest.defaultTestLoader.loadTestsFromTestCase(MonitoringTestCase)
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MonitoringInsertData))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MonitoringTestChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MonitoringDeleteChain))
unittest.TextTestRunner(verbosity=2).run(testSuite)
| arrabito/DIRAC | tests/Integration/Monitoring/Test_MonitoringSystem.py | Python | gpl-3.0 | 27,975 |
# coding=utf-8
"""**Tests for safe raster layer class**
contains tests for QGIS specific methods.
See test_io.py also
"""
__author__ = 'Dmitry Kolesov <kolesov.dm@gmail.com>'
__revision__ = '$Format:%H$'
__date__ = '28/12/2013'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import logging
import unittest
from qgis.core import QgsRasterLayer
from safe.storage.utilities import read_keywords
from safe.storage.raster import Raster
from safe.test.utilities import test_data_path, get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
LOGGER = logging.getLogger('InaSAFE')
KEYWORD_PATH = test_data_path('hazard', 'jakarta_flood_design.xml')
RASTER_BASE = test_data_path('hazard', 'jakarta_flood_design')
class RasterTest(unittest.TestCase):
def setUp(self):
msg = 'Keyword file does not exist at %s' % KEYWORD_PATH
assert os.path.exists(KEYWORD_PATH), msg
def test_qgis_raster_layer_loading(self):
"""Test that reading from QgsRasterLayer works."""
# This line is the cause of the problem:
qgis_layer = QgsRasterLayer(RASTER_BASE + '.tif', 'test')
layer = Raster(data=qgis_layer)
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
def test_convert_to_qgis_raster_layer(self):
"""Test that converting to QgsVectorLayer works."""
# Create vector layer
keywords = read_keywords(RASTER_BASE + '.keywords')
layer = Raster(data=RASTER_BASE + '.tif', keywords=keywords)
# Convert to QgsRasterLayer
qgis_layer = layer.as_qgis_native()
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
if __name__ == '__main__':
suite = unittest.makeSuite(RasterTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| dynaryu/inasafe | safe/storage/test/test_raster.py | Python | gpl-3.0 | 2,520 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, threading
from future_builtins import map
from calibre import browser, random_user_agent
from calibre.customize import Plugin
from calibre.utils.icu import capitalize, lower, upper
from calibre.ebooks.metadata import check_isbn
from calibre.utils.localization import canonicalize_lang, get_lang
def create_log(ostream=None):
from calibre.utils.logging import ThreadSafeLog, FileStream
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
log.outputs = [FileStream(ostream)]
return log
# Comparing Metadata objects for relevance {{{
words = ("the", "a", "an", "of", "and")
prefix_pat = re.compile(r'^(%s)\s+'%("|".join(words)))
trailing_paren_pat = re.compile(r'\(.*\)$')
whitespace_pat = re.compile(r'\s+')
def cleanup_title(s):
if not s:
s = _('Unknown')
s = s.strip().lower()
s = prefix_pat.sub(' ', s)
s = trailing_paren_pat.sub('', s)
s = whitespace_pat.sub(' ', s)
return s.strip()
class InternalMetadataCompareKeyGen(object):
'''
Generate a sort key for comparison of the relevance of Metadata objects,
given a search query. This is used only to compare results from the same
metadata source, not across different sources.
The sort key ensures that an ascending order sort is a sort by order of
decreasing relevance.
The algorithm is:
* Prefer results that have at least one identifier the same as for the query
* Prefer results with a cached cover URL
* Prefer results with all available fields filled in
* Prefer results with the same language as the current user interface language
* Prefer results that are an exact title match to the query
* Prefer results with longer comments (greater than 10% longer)
* Use the relevance of the result as reported by the metadata source's search
engine
'''
def __init__(self, mi, source_plugin, title, authors, identifiers):
same_identifier = 2
idents = mi.get_identifiers()
for k, v in identifiers.iteritems():
if idents.get(k) == v:
same_identifier = 1
break
all_fields = 1 if source_plugin.test_fields(mi) is None else 2
exact_title = 1 if title and \
cleanup_title(title) == cleanup_title(mi.title) else 2
language = 1
if mi.language:
mil = canonicalize_lang(mi.language)
if mil != 'und' and mil != canonicalize_lang(get_lang()):
language = 2
has_cover = 2 if (not source_plugin.cached_cover_url_is_reliable or
source_plugin.get_cached_cover_url(mi.identifiers) is None) else 1
self.base = (same_identifier, has_cover, all_fields, language, exact_title)
self.comments_len = len(mi.comments.strip() if mi.comments else '')
self.extra = (getattr(mi, 'source_relevance', 0), )
def __cmp__(self, other):
result = cmp(self.base, other.base)
if result == 0:
# Now prefer results with the longer comments, within 10%
cx, cy = self.comments_len, other.comments_len
t = (cx + cy) / 20
delta = cy - cx
if abs(delta) > t:
result = delta
else:
result = cmp(self.extra, other.extra)
return result
# }}}
def get_cached_cover_urls(mi):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
url = p.get_cached_cover_url(mi.identifiers)
if url:
yield (p, url)
def dump_caches():
from calibre.customize.ui import metadata_plugins
return {p.name:p.dump_caches() for p in metadata_plugins(['identify'])}
def load_caches(dump):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
cache = dump.get(p.name, None)
if cache:
p.load_caches(cache)
def cap_author_token(token):
lt = lower(token)
if lt in ('von', 'de', 'el', 'van', 'le'):
return lt
# no digits no spez. characters
if re.match(r'([^\d\W]\.){2,}$', lt, re.UNICODE) is not None:
# Normalize tokens of the form J.K. to J. K.
parts = token.split('.')
return '. '.join(map(capitalize, parts)).strip()
scots_name = None
for x in ('mc', 'mac'):
if (token.lower().startswith(x) and len(token) > len(x) and
(
token[len(x)] == upper(token[len(x)]) or
lt == token
)):
scots_name = len(x)
break
ans = capitalize(token)
if scots_name is not None:
ans = ans[:scots_name] + upper(ans[scots_name]) + ans[scots_name+1:]
for x in ('-', "'"):
idx = ans.find(x)
if idx > -1 and len(ans) > idx+2:
ans = ans[:idx+1] + upper(ans[idx+1]) + ans[idx+2:]
return ans
def fixauthors(authors):
if not authors:
return authors
ans = []
for x in authors:
ans.append(' '.join(map(cap_author_token, x.split())))
return ans
def fixcase(x):
if x:
from calibre.utils.titlecase import titlecase
x = titlecase(x)
return x
class Option(object):
__slots__ = ['type', 'default', 'label', 'desc', 'name', 'choices']
def __init__(self, name, type_, default, label, desc, choices=None):
'''
:param name: The name of this option. Must be a valid python identifier
:param type_: The type of this option, one of ('number', 'string',
'bool', 'choices')
:param default: The default value for this option
:param label: A short (few words) description of this option
:param desc: A longer description of this option
:param choices: A dict of possible values, used only if type='choices'.
dict is of the form {key:human readable label, ...}
'''
self.name, self.type, self.default, self.label, self.desc = (name,
type_, default, label, desc)
if choices and not isinstance(choices, dict):
choices = dict([(x, x) for x in choices])
self.choices = choices
class Source(Plugin):
type = _('Metadata source')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
#: Set of capabilities supported by this plugin.
#: Useful capabilities are: 'identify', 'cover'
capabilities = frozenset()
#: List of metadata fields that can potentially be download by this plugin
#: during the identify phase
touched_fields = frozenset()
#: Set this to True if your plugin returns HTML formatted comments
has_html_comments = False
#: Setting this to True means that the browser object will add
#: Accept-Encoding: gzip to all requests. This can speedup downloads
#: but make sure that the source actually supports gzip transfer encoding
#: correctly first
supports_gzip_transfer_encoding = False
#: Set this to True to ignore HTTPS certificate errors when connecting
#: to this source.
ignore_ssl_errors = False
#: Cached cover URLs can sometimes be unreliable (i.e. the download could
#: fail or the returned image could be bogus. If that is often the case
#: with this source set to False
cached_cover_url_is_reliable = True
#: A list of :class:`Option` objects. They will be used to automatically
#: construct the configuration widget for this plugin
options = ()
#: A string that is displayed at the top of the config widget for this
#: plugin
config_help_message = None
#: If True this source can return multiple covers for a given query
can_get_multiple_covers = False
#: If set to True covers downloaded by this plugin are automatically trimmed.
auto_trim_covers = False
#: If set to True, and this source returns multiple results for a query,
#: some of which have ISBNs and some of which do not, the results without
#: ISBNs will be ignored
prefer_results_with_isbn = True
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.running_a_test = False # Set to True when using identify_test()
self._isbn_to_identifier_cache = {}
self._identifier_to_cover_url_cache = {}
self.cache_lock = threading.RLock()
self._config_obj = None
self._browser = None
self.prefs.defaults['ignore_fields'] = []
for opt in self.options:
self.prefs.defaults[opt.name] = opt.default
# Configuration {{{
def is_configured(self):
'''
Return False if your plugin needs to be configured before it can be
used. For example, it might need a username/password/API key.
'''
return True
def is_customizable(self):
return True
def customization_help(self):
return 'This plugin can only be customized using the GUI'
def config_widget(self):
from calibre.gui2.metadata.config import ConfigWidget
return ConfigWidget(self)
def save_settings(self, config_widget):
config_widget.commit()
@property
def prefs(self):
if self._config_obj is None:
from calibre.utils.config import JSONConfig
self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name)
return self._config_obj
# }}}
# Browser {{{
@property
def user_agent(self):
# Pass in an index to random_user_agent() to test with a particular
# user agent
return random_user_agent()
@property
def browser(self):
if self._browser is None:
self._browser = browser(user_agent=self.user_agent, verify_ssl_certificates=not self.ignore_ssl_errors)
if self.supports_gzip_transfer_encoding:
self._browser.set_handle_gzip(True)
return self._browser.clone_browser()
# }}}
# Caching {{{
def get_related_isbns(self, id_):
with self.cache_lock:
for isbn, q in self._isbn_to_identifier_cache.iteritems():
if q == id_:
yield isbn
def cache_isbn_to_identifier(self, isbn, identifier):
with self.cache_lock:
self._isbn_to_identifier_cache[isbn] = identifier
def cached_isbn_to_identifier(self, isbn):
with self.cache_lock:
return self._isbn_to_identifier_cache.get(isbn, None)
def cache_identifier_to_cover_url(self, id_, url):
with self.cache_lock:
self._identifier_to_cover_url_cache[id_] = url
def cached_identifier_to_cover_url(self, id_):
with self.cache_lock:
return self._identifier_to_cover_url_cache.get(id_, None)
def dump_caches(self):
with self.cache_lock:
return {'isbn_to_identifier':self._isbn_to_identifier_cache.copy(),
'identifier_to_cover':self._identifier_to_cover_url_cache.copy()}
def load_caches(self, dump):
with self.cache_lock:
self._isbn_to_identifier_cache.update(dump['isbn_to_identifier'])
self._identifier_to_cover_url_cache.update(dump['identifier_to_cover'])
# }}}
# Utility functions {{{
def get_author_tokens(self, authors, only_first_author=True):
'''
Take a list of authors and return a list of tokens useful for an
AND search query. This function tries to return tokens in
first name middle names last name order, by assuming that if a comma is
in the author name, the name is in lastname, other names form.
'''
if authors:
# Leave ' in there for Irish names
remove_pat = re.compile(r'[!@#$%^&*(){}`~"\s\[\]/]')
replace_pat = re.compile(r'[-+.:;,]')
if only_first_author:
authors = authors[:1]
for au in authors:
has_comma = ',' in au
au = replace_pat.sub(' ', au)
parts = au.split()
if has_comma:
# au probably in ln, fn form
parts = parts[1:] + parts[:1]
for tok in parts:
tok = remove_pat.sub('', tok).strip()
if len(tok) > 2 and tok.lower() not in ('von', 'van',
_('Unknown').lower()):
yield tok
def get_title_tokens(self, title, strip_joiners=True, strip_subtitle=False):
'''
Take a title and return a list of tokens useful for an AND search query.
Excludes connectives(optionally) and punctuation.
'''
if title:
# strip sub-titles
if strip_subtitle:
subtitle = re.compile(r'([\(\[\{].*?[\)\]\}]|[/:\\].*$)')
if len(subtitle.sub('', title)) > 1:
title = subtitle.sub('', title)
title_patterns = [(re.compile(pat, re.IGNORECASE), repl) for pat, repl in
[
# Remove things like: (2010) (Omnibus) etc.
(r'(?i)[({\[](\d{4}|omnibus|anthology|hardcover|audiobook|audio\scd|paperback|turtleback|mass\s*market|edition|ed\.)[\])}]', ''),
# Remove any strings that contain the substring edition inside
# parentheses
(r'(?i)[({\[].*?(edition|ed.).*?[\]})]', ''),
# Remove commas used a separators in numbers
(r'(\d+),(\d+)', r'\1\2'),
# Remove hyphens only if they have whitespace before them
(r'(\s-)', ' '),
# Replace other special chars with a space
(r'''[:,;!@$%^&*(){}.`~"\s\[\]/]''', ' '),
]]
for pat, repl in title_patterns:
title = pat.sub(repl, title)
tokens = title.split()
for token in tokens:
token = token.strip().strip('"').strip("'")
if token and (not strip_joiners or token.lower() not in ('a',
'and', 'the', '&')):
yield token
def split_jobs(self, jobs, num):
'Split a list of jobs into at most num groups, as evenly as possible'
groups = [[] for i in range(num)]
jobs = list(jobs)
while jobs:
for gr in groups:
try:
job = jobs.pop()
except IndexError:
break
gr.append(job)
return [g for g in groups if g]
def test_fields(self, mi):
'''
Return the first field from self.touched_fields that is null on the
mi object
'''
for key in self.touched_fields:
if key.startswith('identifier:'):
key = key.partition(':')[-1]
if not mi.has_identifier(key):
return 'identifier: ' + key
elif mi.is_null(key):
return key
def clean_downloaded_metadata(self, mi):
'''
Call this method in your plugin's identify method to normalize metadata
before putting the Metadata object into result_queue. You can of
course, use a custom algorithm suited to your metadata source.
'''
docase = mi.language == 'eng' or mi.is_null('language')
if docase and mi.title:
mi.title = fixcase(mi.title)
mi.authors = fixauthors(mi.authors)
if mi.tags and docase:
mi.tags = list(map(fixcase, mi.tags))
mi.isbn = check_isbn(mi.isbn)
def download_multiple_covers(self, title, authors, urls, get_best_cover, timeout, result_queue, abort, log, prefs_name='max_covers'):
if not urls:
log('No images found for, title: %r and authors: %r'%(title, authors))
return
from threading import Thread
import time
if prefs_name:
urls = urls[:self.prefs[prefs_name]]
if get_best_cover:
urls = urls[:1]
log('Downloading %d covers'%len(urls))
workers = [Thread(target=self.download_image, args=(u, timeout, log, result_queue)) for u in urls]
for w in workers:
w.daemon = True
w.start()
alive = True
start_time = time.time()
while alive and not abort.is_set() and time.time() - start_time < timeout:
alive = False
for w in workers:
if w.is_alive():
alive = True
break
abort.wait(0.1)
def download_image(self, url, timeout, log, result_queue):
try:
ans = self.browser.open_novisit(url, timeout=timeout).read()
result_queue.put((self, ans))
log('Downloaded cover from: %s'%url)
except Exception:
self.log.exception('Failed to download cover from: %r'%url)
# }}}
# Metadata API {{{
def get_book_url(self, identifiers):
'''
Return a 3-tuple or None. The 3-tuple is of the form:
(identifier_type, identifier_value, URL).
The URL is the URL for the book identified by identifiers at this
source. identifier_type, identifier_value specify the identifier
corresponding to the URL.
This URL must be browseable to by a human using a browser. It is meant
to provide a clickable link for the user to easily visit the books page
at this source.
If no URL is found, return None. This method must be quick, and
consistent, so only implement it if it is possible to construct the URL
from a known scheme given identifiers.
'''
return None
def get_book_url_name(self, idtype, idval, url):
'''
Return a human readable name from the return value of get_book_url().
'''
return self.name
def get_book_urls(self, identifiers):
'''
Override this method if you would like to return multiple urls for this book.
Return a list of 3-tuples. By default this method simply calls :func:`get_book_url`.
'''
data = self.get_book_url(identifiers)
if data is None:
return ()
return (data,)
def get_cached_cover_url(self, identifiers):
'''
Return cached cover URL for the book identified by
the identifiers dict or None if no such URL exists.
Note that this method must only return validated URLs, i.e. not URLS
that could result in a generic cover image or a not found error.
'''
return None
def identify_results_keygen(self, title=None, authors=None,
identifiers={}):
'''
Return a function that is used to generate a key that can sort Metadata
objects by their relevance given a search query (title, authors,
identifiers).
These keys are used to sort the results of a call to :meth:`identify`.
For details on the default algorithm see
:class:`InternalMetadataCompareKeyGen`. Re-implement this function in
your plugin if the default algorithm is not suitable.
'''
def keygen(mi):
return InternalMetadataCompareKeyGen(mi, self, title, authors,
identifiers)
return keygen
def identify(self, log, result_queue, abort, title=None, authors=None,
identifiers={}, timeout=30):
'''
Identify a book by its title/author/isbn/etc.
If identifiers(s) are specified and no match is found and this metadata
source does not store all related identifiers (for example, all ISBNs
of a book), this method should retry with just the title and author
(assuming they were specified).
If this metadata source also provides covers, the URL to the cover
should be cached so that a subsequent call to the get covers API with
the same ISBN/special identifier does not need to get the cover URL
again. Use the caching API for this.
Every Metadata object put into result_queue by this method must have a
`source_relevance` attribute that is an integer indicating the order in
which the results were returned by the metadata source for this query.
This integer will be used by :meth:`compare_identify_results`. If the
order is unimportant, set it to zero for every result.
Make sure that any cover/isbn mapping information is cached before the
Metadata object is put into result_queue.
:param log: A log object, use it to output debugging information/errors
:param result_queue: A result Queue, results should be put into it.
Each result is a Metadata object
:param abort: If abort.is_set() returns True, abort further processing
and return as soon as possible
:param title: The title of the book, can be None
:param authors: A list of authors of the book, can be None
:param identifiers: A dictionary of other identifiers, most commonly
{'isbn':'1234...'}
:param timeout: Timeout in seconds, no network request should hang for
longer than timeout.
:return: None if no errors occurred, otherwise a unicode representation
of the error suitable for showing to the user
'''
return None
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
'''
Download a cover and put it into result_queue. The parameters all have
the same meaning as for :meth:`identify`. Put (self, cover_data) into
result_queue.
This method should use cached cover URLs for efficiency whenever
possible. When cached data is not present, most plugins simply call
identify and use its results.
If the parameter get_best_cover is True and this plugin can get
multiple covers, it should only get the "best" one.
'''
pass
# }}}
| jelly/calibre | src/calibre/ebooks/metadata/sources/base.py | Python | gpl-3.0 | 22,537 |
#!/usr/bin/python
from pysharegps import sharedGpsClient
import logging
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(message)s',level=logging.DEBUG)
client = sharedGpsClient()
if client.isInit():
position = client.getSharedObject().getPosition()
print "latitude: "+str(position[0])+", "+str(position[1])+" ("+str(position[2])+")"
altitude = client.getSharedObject().getAltitude()
print "altitude: "+str(altitude[0])+str(altitude[1])+" ("+str(position[2])+")"
place = client.getSharedObject().getPlace()
if place[4] == None:
print "no defined place near this position"
else:
print "nearest place: "+place[0]+" at "+str(place[1])+str(place[2])+","+str(place[3])+" ("+str(place[4])+")"
else:
print "fail to init shared object" | djo938/GPSPythonSharingWithPyro | clientTest.py | Python | gpl-3.0 | 876 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user, MainLoop, get_current_arch, ChecksumType
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anymore)"),
download_page=None, only_on_archs=_supported_archs, only_for_removal=True, **kwargs)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
only_on_archs=_supported_archs,
download_page="https://raw.githubusercontent.com/dart-lang/sdk/master/CHANGELOG.md",
dir_to_decompress_in_tarball="dart-sdk",
checksum_type=ChecksumType.sha256,
required_files_path=[os.path.join("bin", "dart")],
**kwargs)
arch_trans = {
"amd64": "x64",
"i386": "ia32"
# TODO: add arm
}
def parse_download_link(self, line, in_download):
"""Parse Dart SDK download links"""
in_download = False
p = re.search(r"^##\s(\d\S+)", line)
if p is not None:
in_download = True
else:
in_download = False
if in_download:
with suppress(AttributeError):
self.new_download_url = "https://storage.googleapis.com/dart-archive/channels/stable/" +\
"release/{}/sdk/".format(p.group(1)) +\
"dartsdk-linux-{}-release.zip".format(self.arch_trans[get_current_arch()]) +\
".sha256sum"
return ((None, None), in_download)
@MainLoop.in_mainloop_thread
def get_sha_and_start_download(self, download_result):
res = download_result[self.new_download_url]
checksum = res.buffer.getvalue().decode('utf-8').split()[0]
# you get and store self.download_url
url = re.sub('.sha256sum', '', self.new_download_url)
self.check_data_and_start_download(url, checksum)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
class FlutterLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Flutter SDK", description=_("Flutter SDK"),
only_on_archs=_supported_archs,
download_page="https://api.flutter.dev/flutter/footer.js",
dir_to_decompress_in_tarball="flutter",
required_files_path=[os.path.join("bin", "flutter")],
**kwargs)
def parse_download_link(self, line, in_download):
"""Parse Flutter SDK download links"""
url = None
in_download = False
if 'Flutter ' in line:
p = re.search(r"Flutter\s(\S+)", line)
if p is not None:
in_download = True
if in_download:
with suppress(AttributeError):
url = "https://storage.googleapis.com/flutter_infra/releases/stable/linux/" +\
"flutter_linux_v{}-stable.tar.xz".format(p.group(1))
return ((url, None), in_download)
def post_install(self):
"""Add flutter necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
| LyzardKing/ubuntu-make | umake/frameworks/dart.py | Python | gpl-3.0 | 5,057 |