repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
SLIT | SLIT-master/SLIT/Lens.py | import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import scipy.signal as scp
import warnings
warnings.simplefilter("ignore")
#Tool box for lensing
def SIS(x0,y0,n1,n2,Re):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
count = 0
for i in x:
kappa[x[count],y[count]] = Re/(2*np.sqrt((x[count]-x0)**2+(y[count]-y0)**2))
count += 1
if np.isfinite(kappa[x0,y0]) == False:
kappa[x0,y0] = 1
return kappa
def SIE_xy(x,y,x0,y0,b,beta,q,xc,theta):
eps = (1-q**2)/(1+q**2)
up = b**(beta-1)
pre = up/(2*(1-eps)**((beta-1)/2))
count = 0
theta = theta*np.pi/180.
Xr = (x-x0)*np.cos(theta)-(y-y0)*np.sin(theta)
Yr = (x-x0)*np.sin(theta)+(y-y0)*np.cos(theta)
kappa = pre/((xc**2.)/(1.-eps)+(Xr)**2.+((Yr)**2.)/q**2.)**((beta-1.)/2.)
return kappa
def SIE(x0,y0,n1,n2,b,beta,q,xc,theta):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
x2d = np.reshape(x, (n1,n2))
y2d = np.reshape(y, (n1,n2))
kappa = SIE_xy(x2d,y2d,x0,y0,b,beta,q,xc,theta)
return kappa
def alpha_def(kappa, n1,n2,extra=0):
#Computes the deflection angle of a single photon at coordinates theta in the source plane and a lens
#mass distribution kappa
nk1,nk2 = np.shape(kappa)
#Coordonnees de la grille de l'espace image
[x,y] = np.where(np.zeros([nk1,nk2])==0)
x0 = nk1/2
y0 = nk2/2
xc = np.reshape((x)-x0,(nk1,nk2))
yc = np.reshape((y)-y0,(nk1,nk2))
r = (xc**2+yc**2)
lx,ly = np.where(r==0)
tabx = np.reshape(np.float_(xc)/(r),(nk1,nk2))
taby = np.reshape(np.float_(yc)/(r),(nk1,nk2))
tabx[lx,ly]=0
taby[lx,ly]=0
kappa = kappa.astype(float)
tabx = tabx.astype(float)
# kappa[rk>(nk1)/2.] = 0
intex = scp.fftconvolve(tabx, (kappa), mode = 'same')/np.pi
intey = scp.fftconvolve(taby, (kappa), mode = 'same')/np.pi
return intex[x0-(n1)/2:x0+(n1)/2,y0-(n2)/2:y0+(n2)/2], intey[x0-(n1)/2:x0+(n1)/2,y0-(n2)/2:y0+(n2)/2]
def beta(kappa,theta):
#Computes beta
beta = theta-alpha(theta,kappa)
return beta
def theta(alpha, beta):
#Computes beta
theta = beta+alpha
return beta
def F(kappa, nt1,nt2, size, extra=100, x_shear = 0, y_shear = 0, alpha_x_in = [-99], alpha_y_in = [-99]):
# Theta positions for each pixel in beta
if np.sum(alpha_x_in) != [-99]:
alpha_x = alpha_x_in
alpha_y = alpha_y_in
else:
nk1,nk2 = np.shape(kappa)
alpha_x,alpha_y = alpha_def(kappa,nt1,nt2,extra = extra)
alpha_x = alpha_x+x_shear
alpha_y = alpha_y+y_shear
na1,na2 = np.shape(alpha_x)
xa,ya = np.where(np.zeros((na1,na2)) == 0)
nb1=nt1*size
nb2=nt2*size
xb, yb = np.where(np.zeros((nb1,nb2))==0)
#Scaling of the source grid
#Scaling of the deflection grid
xa = xa*(np.float(nt1)/np.float(na1))#-0.68
ya = ya*(np.float(nt2)/np.float(na2))#-0.68
#Setting images coordinates in 2d
xa2d = np.reshape(xa,(na1,na2))
ya2d = np.reshape(ya,(na1,na2))
F2 = []
for i in range(np.size(xb)):
#Deflection of photons emitted in xb[i],yb[i]
theta_x = (xb[i])*(np.float(nt1)/np.float(nb1))+alpha_x
theta_y = (yb[i])*(np.float(nt2)/np.float(nb2))+alpha_y
#Matching of arrivals with pixels in image plane
xprox = np.int_(np.abs((xa2d-theta_x)*2))
yprox = np.int_(np.abs((ya2d-theta_y)*2))
if np.min(xprox+yprox) <1:
loc2 = np.array(np.where((xprox+yprox)==np.min(xprox+yprox)))*np.float(nt1)/np.float(na1)#
else:
loc2 = []
if (np.size(loc2)==0):
F2.append([0])
else:
F2.append(np.int_(loc2))
return F2
def source_to_image(Source, nt1,nt2, theta, ones = 1):
# Source: Image of the source in the source plane
# n1,n2: size in pixels of the postage stamp in image plane
# F: the coordinates of the lens mapping
F = (theta)
nb1,nb2 = np.shape(Source)
if ones == 1:
onelens = source_to_image(np.ones(Source.shape), nt1,nt2, theta, ones = 0)
onelens[np.where(onelens==0)]=1
else:
onelens = 1.
Image = np.zeros((nt1,nt2))
xb,yb = np.where(np.zeros((nb1,nb2)) == 0)
N = np.size(xb)
k=0
for pos in F:
if np.size(np.shape(pos)) != 1:
Image[np.array(pos[0][:]),
np.array(pos[1][:])] += Source[xb[k],yb[k]]#fullSource
k=k+1
return Image/onelens
def image_to_source(Image, size,beta,lensed = 0, square = 0):
# Image: postagestamp of the observed image
# nsize1,nsize2: size of the postagestamp in source plane
# F: lens mapping matrix
F = (beta)
nt1,nt2 = np.shape(Image)
nb1 = nt1*size
nb2 = nt2*size
Source = np.zeros((nb1,nb2))
xb,yb = np.where(Source == 0)
N = np.size(xb)
for k in range(N):
pos = F[k]
if np.size(np.shape(pos)) > 1:
if np.sum(lensed) !=0:
if square == 0:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])/np.max([1,np.size(pos[0][:])])
else:
Source[xb[k], yb[k]] += np.sum((Image[np.array(pos[0][:]),
np.array(pos[1][:])] / np.max([1, np.size(pos[0][:])]))**2)
else:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])
if square == 1:
Source = np.sqrt(Source)
return Source
def image_to_source_bound(Image, size,beta,lensed = 0):
# Image: postagestamp of the observed image
# nsize1,nsize2: size of the postagestamp in source plane
# F: lens mapping matrix
F = (beta)
nt1,nt2 = np.shape(Image)
nb1 = nt1*size
nb2 = nt2*size
Source = np.zeros((nb1,nb2))
xb,yb = np.where(Source == 0)
N = np.size(xb)
for k in range(N):
pos = F[k]
if np.size(np.shape(pos)) > 1:
if np.sum(lensed) !=0:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])/np.max([1,np.size(pos[0][:])])
else:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])
return Source
| 6,888 | 28.440171 | 123 | py |
SLIT | SLIT-master/SLIT/__init__.py | from Solve import *
import Lens
import wave_transform
import tools
| 67 | 12.6 | 21 | py |
SLIT | SLIT-master/Examples/Test_SLIT_forUsers.py | import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from scipy import signal as scp
import SLIT
import time
from scipy import signal as scp
import warnings
warnings.simplefilter("ignore")
#Example of a run of the SLIT algorithm on simulated images.
#Here the first part of the file shows how simulations are generated.
#For users intereseted only in seeing the code run, have a look at the running SLIT section.
#The command line that builds the Fkappa operator is also of outmost importance.
Image = '''Input 2D image of the lens to invert '''
nt1,nt2 = np.shape(Image)
###############################Mass profile###############################
x0,y0 = '''Input the center of mass of the lens with regard to coodinates in Image '''
kappa = '''Input dimensionless pixelated mass density profile here '''
size = '''Input the desired size of the output with regard to Image. Chosing 1 will result in a source with the same number of pixels as in Image. '''
#Mapping between lens and source IMPORTANT
Fkappa = SLIT.Lens.F(kappa, nt1,nt2, size,x0,y0)
PSF = '''Input the PSF for Image here'''
PSFconj = '''Input the conjugate of the PSF here given by
np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1])))), but be carefull that the result is still centered '''
################################Running SLIT############################
#Parameters
kmax = 5
niter =50
#Start clock
start = time.clock()
#Running SLIT
S, FS = SLIT.SLIT(Image, Fkappa, kmax, niter, size, PSF, PSFconj)
#Stop clock
elapsed = (time.clock()-start)
print('execution time:', elapsed, 'seconds')
#Reconstruction goodness
real_source = newsource
source_error = np.sum(np.abs(real_source[np.where(real_source!=0)]
-S[np.where(real_source!=0)])**2
/real_source[np.where(real_source!=0)]**2)/(np.size(
np.where(real_source!=0))/2.)
image_chi2 = np.std(Image-FS)**2/sigma**2
print('Residuals in source space', source_error)
print('Residuals in image space',image_chi2)
#Display of results
for i in [1]:
plt.figure(2)
# plt.suptitle('FISTA: error per pixel on the source: '+str(source_error)+' image chi2:'+str(image_chi2))
# plt.subplot(2,3,1)
plt.title('Source from SLIT')
plt.imshow((S), vmin = np.min(real_source), vmax = np.max(real_source),cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,2)
plt.figure(3)
plt.title('Original source')
plt.imshow(real_source, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,3)
plt.figure(4)
plt.title('Lensed source')
plt.imshow(Image, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(41)
plt.title('Reconstructed lensed source')
plt.imshow(FS, vmin = np.min(Image), vmax = np.max(Image), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,4)
plt.figure(5)
plt.title('relative difference')
diff = (real_source-S)/real_source
diff[np.where(real_source==0)] = 0
diff[np.where(diff>1)]= np.log(0.)
plt.imshow(np.abs(diff), vmax = 1., vmin = 0., cmap = cm.gist_stern, interpolation = 'nearest' )
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,5)
plt.figure(6)
plt.title('difference with reconstructed lensed image')
plt.imshow(Image-FS, vmin = -5*sigma, vmax = 5*sigma, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,6)
plt.figure(7)
plt.title('difference with true source')
plt.imshow((np.abs(real_source-S)), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.show()
| 3,928 | 37.145631 | 150 | py |
SLIT | SLIT-master/Examples/Test_SLIT_MCA.py | from SLIT import Lens
import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from scipy import signal as scp
import SLIT as slit
import time
from scipy import signal as scp
import warnings
warnings.simplefilter("ignore")
#Example of a run of the SLIT_MCA algorithm on simulated images.
#Here the first part of the file shows how simulations are generated.
#For users intereseted only in seeing the code run, have a look at the running SLIT_MCA section.
#The command line that builds the Fkappa operator is also of outmost importance.
###############################Simulation###############################
def SIE(x0,y0,n1,n2,b,beta,q,xc,theta):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
eps = (1-q**2)/(1+q**2)
up = b**(beta-1)
pre = up/(2*(1-eps)**((beta-1)/2))
count = 0
theta = theta*np.pi/180.
for i in x:
Xr = (x[count]-x0)*np.cos(theta)-(y[count]-y0)*np.sin(theta)
Yr = (x[count]-x0)*np.sin(theta)+(y[count]-y0)*np.cos(theta)
kappa[x[count],y[count]] = pre/((xc**2.)/(1.-eps)+(Xr)**2.+((Yr)**2.)/q**2.)**((beta-1.)/2.)
count += 1
return kappa
#Source light profile
newsource = pf.open('../Files/source.fits')[0].data
##N1,N2 are the numbers of pixels in the image plane.
nt1= 100
nt2 = 100
#Size ratio of the source to image number of pixels
size = 1
#PSF
PSF0 = pf.open('../Files/PSF.fits')[0].data
PSF = PSF0[1:,1:]
PSFconj = np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
PSFconj=PSFconj/np.sum(PSFconj)
PSF = PSF/np.sum(PSF)
## Lens mass distribution.
b = 1.53/0.05
xc = 0.95
q = 0.71
betata = 2.1
thetata = 25.2
kappa = SIE(nt1/2.+50,nt2/2.+50,nt1+100,nt2+100,b,betata,q,xc, thetata)
#Mapping between lens and source IMPORTANT
Fkappa = slit.Lens.F(kappa, nt1,nt2, size,nt1/2.,nt2/2.)
#Lens galaxy light profile
gal0 = pf.open('../Files/Galaxy.fits')[0].data
#Generation of lensed source
I2 = slit.Lens.source_to_image(newsource, nt1 ,nt2 , Fkappa)
HI2 = scp.fftconvolve(I2, PSF.astype(float), mode = 'same')
#Noise levels
SNR = 500
sigma = np.sqrt(np.sum(I2**2)/SNR/(nt1*nt2*size**2))
#Convolution of the observed image
simu = scp.fftconvolve(gal0.astype(float)+I2, PSF.astype(float), mode = 'same')
#Sotring the convolved lens light profile:
gal = scp.fftconvolve(gal0.astype(float), PSF.astype(float), mode = 'same')
#Final simulated image
Image = simu+np.random.randn(nt1,nt2)*sigma
lensed = slit.lens_one(Fkappa, nt1,nt2, size)
Test = slit.Lens.image_to_source(I2+gal, size, Fkappa, lensed = lensed)
plt.imshow(Image, cmap = cm.gist_stern, interpolation = 'nearest')
plt.show()
plt.imshow(Test, cmap = cm.gist_stern, interpolation = 'nearest')
plt.show()
hdus = pf.PrimaryHDU(Image)
lists = pf.HDUList([hdus])
lists.writeto('Image.fits', clobber=True)
################################Running SLIT############################
#Parameters
kmax = 5
niter =100
riter =100
levels = [0]
#Comment the following to have the level estimation routine run (takes more time)
levels = pf.open('../Files/Noise_levels_SLIT_MCA.fits')[0].data
#Start clock
start = time.clock()
#Running SLIT_MCA
S, FS, G = slit.SLIT_MCA(Image, Fkappa, kmax, niter,riter, size,PSF, PSFconj, levels = levels)
#Stop clock
elapsed = (time.clock()-start)
print('execution time:', elapsed, 'seconds')
real_source = newsource
source_error = np.sum(np.abs(real_source[np.where(real_source!=0)]
-S[np.where(real_source!=0)])**2
/real_source[np.where(real_source!=0)]**2)/(np.size(
np.where(real_source!=0))/2.)
image_chi2 = np.std(Image-FS-G)**2/sigma**2
print('Residuals in source space', source_error)
print('Residuals in image space',image_chi2)
for i in [1]:
###Source
plt.figure(0)
plt.title('Source from SLIT')
plt.imshow((S), vmin = np.min(real_source), vmax = np.max(real_source), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(1)
plt.title('Original image of the source')
plt.imshow(real_source, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(2)
plt.title('relative difference')
diff = (real_source-S)
plt.imshow((np.abs(diff)), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
####Lensed source
plt.figure(3)
plt.title('Original lensed galaxy')
plt.imshow(HI2, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(4)
plt.title('reconstructed lensed source')
plt.imshow((FS), vmin = np.min(I2), vmax = np.max(I2), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(5)
plt.title('error on the source in image plane')
plt.imshow((HI2-FS), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
###Galaxy
plt.figure(6)
plt.title('Original galaxy')
plt.imshow((gal0), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(12)
plt.title('Estimated Galaxy')
plt.imshow((G), vmin = np.min(gal0), vmax = np.max(gal0), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(7)
plt.title('Error on the galaxy')
plt.imshow((gal-G), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
###Image
plt.figure(8)
plt.title('Image')
plt.imshow(Image, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(9)
plt.title('Reconstructed image')
plt.imshow(FS+G, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(10)
plt.title('difference with reconstructed image')
plt.imshow(Image-FS-G,cmap = cm.gist_stern, interpolation = 'nearest', vmin = -5*sigma, vmax = 5*sigma)#slit.fft_convolve(Im,PSF)
plt.axis('off')
plt.colorbar()
plt.show()
| 6,169 | 29.544554 | 133 | py |
SLIT | SLIT-master/Examples/Test_SLIT.py | import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
import SLIT
import time
from scipy import signal as scp
import warnings
warnings.simplefilter("ignore")
#Example of a run of the SLIT algorithm on simulated images.
#Here the first part of the file shows how simulations are generated.
#For users intereseted only in seeing the code run, have a look at the running SLIT section.
#The command line that builds the Fkappa operator is also of outmost importance.
###############################Simulation###############################
def SIE(x0,y0,n1,n2,b,beta,q,xc,theta):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
eps = (1-q**2)/(1+q**2)
up = b**(beta-1)
pre = up/(2*(1-eps)**((beta-1)/2))
count = 0
theta = theta*np.pi/180.
for i in x:
Xr = (x[count]-x0)*np.cos(theta)-(y[count]-y0)*np.sin(theta)
Yr = (x[count]-x0)*np.sin(theta)+(y[count]-y0)*np.cos(theta)
kappa[x[count],y[count]] = pre/((xc**2.)/(1.-eps)+(Xr)**2.+((Yr)**2.)/q**2.)**((beta-1.)/2.)
count += 1
return kappa
#Source light profile
newsource = pf.open('../Files/source.fits')[0].data
##N1,N2 are the numbers of pixels in the image plane.
nt1= 100
nt2 = 100
#Size ratio of the source to image number of pixels
size = 1
#PSF
PSF0 = pf.open('../Files/PSF.fits')[0].data
PSF = PSF0[1:,1:]
PSFconj = np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
PSFconj=PSFconj/np.sum(PSFconj)
PSF = PSF/np.sum(PSF)
## Lens mass distribution.
b = 1.53/0.05
xc = 0.95
q = 0.71
betata = 2.1
thetata = 25.2
kappa = SIE(nt1/2.+50,nt2/2.+50,nt1+100,nt2+100,b,betata,q,xc, thetata)
#Mapping between lens and source IMPORTANT
Fkappa = SLIT.Lens.F(kappa, nt1,nt2, size,nt1/2.,nt2/2.)
#Generation of lensed source
I2 = SLIT.Lens.source_to_image(newsource, nt1 ,nt2 , Fkappa)
#Noise levels
SNR = 500
sigma = np.sqrt(np.sum(I2**2)/SNR/(nt1*nt2*size**2))
#Convolution by the PSF and generation of the final image
I2 = scp.fftconvolve(I2, PSF, mode = 'same')
#Final simulated image
Image = I2+np.random.randn(nt1,nt2)*sigma
################################Running SLIT############################
#Parameters
kmax = 5
niter =100
levels = [0]
#Comment the following to have the level estimation routine run (takes more time)
levels = pf.open('../Files/Noise_levels_SLIT.fits')[0].data
#Start clock
start = time.clock()
#Running SLIT
sourcesl, Imsl = SLIT.SLIT(Image, Fkappa, kmax, niter, size, PSF, PSFconj, levels = levels, fb =1)
#Stop clock
elapsed = (time.clock()-start)
print('execution time:', elapsed, 'seconds')
#Reconstruction goodness
real_source = newsource
source_error = np.sum(np.abs(real_source[np.where(real_source!=0)]
-sourcesl[np.where(real_source!=0)])**2
/real_source[np.where(real_source!=0)]**2)/(np.size(
np.where(real_source!=0))/2.)
image_chi2 = np.std(Image-Imsl)**2/sigma**2
print('Residuals in source space', source_error)
print('Residuals in image space',image_chi2)
#Display of results
for i in [1]:
plt.figure(2)
# plt.suptitle('FISTA: error per pixel on the source: '+str(source_error)+' image chi2:'+str(image_chi2))
# plt.subplot(2,3,1)
plt.title('Source from SLIT')
plt.imshow((sourcesl), vmin = np.min(real_source), vmax = np.max(real_source),cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,2)
plt.figure(3)
plt.title('Original source')
plt.imshow(real_source, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,3)
plt.figure(4)
plt.title('Lensed source')
plt.imshow(Image, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(41)
plt.title('Reconstructed lensed source')
plt.imshow(Imsl, vmin = np.min(Image), vmax = np.max(Image), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,4)
plt.figure(5)
plt.title('relative difference')
diff = (real_source-sourcesl)/real_source
diff[np.where(real_source==0)] = 0
diff[np.where(diff>1)]= np.log(0.)
plt.imshow(np.abs(diff), vmax = 1., vmin = 0., cmap = cm.gist_stern, interpolation = 'nearest' )
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,5)
plt.figure(6)
plt.title('difference with reconstructed lensed image')
plt.imshow(Image-Imsl, vmin = -5*sigma, vmax = 5*sigma, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,6)
plt.figure(7)
plt.title('difference with true source')
plt.imshow((np.abs(real_source-sourcesl)), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.show()
| 4,935 | 31.473684 | 130 | py |
SLIT | SLIT-master/Examples/Test_SLIT_HR.py | import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from scipy import signal as scp
import SLIT
import time
from scipy import signal as scp
import warnings
warnings.simplefilter("ignore")
#Example of a run of the SLIT algorithm on simulated images.
#Here the first part of the file shows how simulations are generated.
#For users intereseted only in seeing the code run, have a look at the running SLIT section.
#The command line that builds the Fkappa operator is also of outmost importance.
###############################Simulation###############################
def SIE(x0,y0,n1,n2,b,beta,q,xc,theta):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
eps = (1-q**2)/(1+q**2)
up = b**(beta-1)
pre = up/(2*(1-eps)**((beta-1)/2))
count = 0
theta = theta*np.pi/180.
for i in x:
Xr = (x[count]-x0)*np.cos(theta)-(y[count]-y0)*np.sin(theta)
Yr = (x[count]-x0)*np.sin(theta)+(y[count]-y0)*np.cos(theta)
kappa[x[count],y[count]] = pre/((xc**2.)/(1.-eps)+(Xr)**2.+((Yr)**2.)/q**2.)**((beta-1.)/2.)
count += 1
return kappa
#Source light profile
newsource = pf.open('../Files/Source_HR.fits')[0].data
##N1,N2 are the numbers of pixels in the image plane.
nt1= 100
nt2 = 100
#Size ratio of the source to image number of pixels
size = 2
#PSF
PSF0 = pf.open('../Files/PSF.fits')[0].data
PSF = PSF0[1:,1:]
PSFconj = np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
PSFconj=PSFconj/np.sum(PSFconj)
PSF = PSF/np.sum(PSF)
## Lens mass distribution.
b = 1.53/0.05
xc = 0.95
q = 0.71
betata = 2.1
thetata = 25.2
kappa = SIE(nt1/2.+50,nt2/2.+50,nt1+100,nt2+100,b,betata,q,xc, thetata)
#Mapping between lens and source IMPORTANT
Fkappa = SLIT.Lens.F(kappa, nt1,nt2, size,nt1/2.,nt2/2.)
#Generation of lensed source
I2 = SLIT.Lens.source_to_image(newsource, nt1 ,nt2 , Fkappa)
#Noise levels
SNR = 500
sigma = np.sqrt(np.sum(I2**2)/SNR/(nt1*nt2*size**2))
#Convolution by the PSF and generation of the final image
I2 = scp.fftconvolve(I2, PSF, mode = 'same')
#Final simulated image
Image = I2+np.random.randn(nt1,nt2)*sigma
################################Running SLIT############################
#Parameters
kmax = 5
niter =50
levels = [0]
#Comment the following to have the level estimation routine run (takes more time)
levels = pf.open('../Files/Noise_levels_SLIT_HR.fits')[0].data
#Start clock
start = time.clock()
#Running SLIT
sourcesl, Imsl = SLIT.SLIT(Image, Fkappa, kmax, niter, size, PSF, PSFconj, levels = levels)
#Stop clock
elapsed = (time.clock()-start)
print('execution time:', elapsed, 'seconds')
#Reconstruction goodness
real_source = newsource
source_error = np.sum(np.abs(real_source[np.where(real_source!=0)]
-sourcesl[np.where(real_source!=0)])**2
/real_source[np.where(real_source!=0)]**2)/(np.size(
np.where(real_source!=0))/2.)
image_chi2 = np.std(Image-Imsl)**2/sigma**2
print('Residuals in source space', source_error)
print('Residuals in image space',image_chi2)
#Display of results
for i in [1]:
plt.figure(2)
# plt.suptitle('FISTA: error per pixel on the source: '+str(source_error)+' image chi2:'+str(image_chi2))
# plt.subplot(2,3,1)
plt.title('Source from SLIT')
plt.imshow((sourcesl), vmin = np.min(real_source), vmax = np.max(real_source),cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,2)
plt.figure(3)
plt.title('Original source')
plt.imshow(real_source, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,3)
plt.figure(4)
plt.title('Lensed source')
plt.imshow(Image, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.figure(41)
plt.title('Reconstructed lensed source')
plt.imshow(Imsl, vmin = np.min(Image), vmax = np.max(Image), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,4)
plt.figure(5)
plt.title('relative difference')
diff = (real_source-sourcesl)/real_source
diff[np.where(real_source==0)] = 0
diff[np.where(diff>1)]= np.log(0.)
plt.imshow(np.abs(diff), vmax = 1., vmin = 0., cmap = cm.gist_stern, interpolation = 'nearest' )
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,5)
plt.figure(6)
plt.title('difference with reconstructed lensed image')
plt.imshow(Image-Imsl, vmin = -5*sigma, vmax = 5*sigma, cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
# plt.subplot(2,3,6)
plt.figure(7)
plt.title('difference with true source')
plt.imshow((np.abs(real_source-sourcesl)), cmap = cm.gist_stern, interpolation = 'nearest')
plt.axis('off')
plt.colorbar()
plt.show()
| 4,965 | 31.457516 | 130 | py |
SLIT | SLIT-master/Examples/Test_sparsity.py | from SLIT import Lens
import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from scipy import signal as scp
from SLIT import wave_transform as mw
import time
from scipy import signal as scp
import SLIT as slit
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import warnings
warnings.simplefilter("ignore")
S = pf.open('../Files/source.fits')[0].data
G = pf.open('../Files/Galaxy.fits')[0].data
##Sizes in image and source planes
nt1= 100
nt2 = 100
size = 1
#Mass profile of the lens
kappa = pf.open('../Files/kappa.fits')[0].data
Fkappa = Lens.F(kappa, nt1,nt2, size,nt1/2.,nt2/2.)
lensed = slit.lens_one(Fkappa, nt1,nt2, size)
#Levels for normalisation
lev = slit.level(nt1,nt1)
#Starlet transforms of the lens and source in their respective planes
wG = mw.wave_transform(G, lvl = 6, newwave =1)/lev
wS = mw.wave_transform(S, lvl = 6, newwave =1)/lev
#Lensed source
FS = Lens.source_to_image(S, nt1, nt2,Fkappa)
#Unlensed lens
FG = Lens.image_to_source(G, size, Fkappa, lensed=lensed)
#Starlet transform of the unlensed lens
wFG = mw.wave_transform(FG, 6, newwave =1)/lev
#Starlet transform of the lensed
wFS = mw.wave_transform(FS, 6, newwave =1)/lev
def mk_sort(X):
Y = np.sort(np.resize(np.abs(X), X.size))
return Y[::-1]
#Function that computes the reconstruction error from the p% highest coefficients
def error_rec_from(X, p, wave = 0):
Xcopy = np.copy(X)
Y = mk_sort(X)
ymin = Y[p*Y.size/1000.]
Xcopy[np.abs(X)<ymin] = 0
if wave == 1:
err = (mw.iuwt(Xcopy)-mw.iuwt(X))**2
else:
err = ((Xcopy)-(X))**2
error = np.sum(err)
return error
#Computation of reconstruction errors for each light profile
error_wS = np.zeros(1000)
error_S = np.zeros(1000)
error_wFS = np.zeros(1000)
error_G = np.zeros(1000)
error_wG = np.zeros(1000)
error_wFG = np.zeros(1000)
for i in np.linspace(0,999, 1000):
error_wS[i] = error_rec_from(wS, i, wave = 1)
error_S[i] = error_rec_from(S, i)
error_wFS[i] = error_rec_from(wFS, i, wave = 1)
error_G[i] = error_rec_from(G, i)
error_wG[i] = error_rec_from(wG, i, wave = 1)
error_wFG[i] = error_rec_from(wFG, i, wave = 1)
print('NLA on the source at 10%: ',error_wS[100]/np.max(error_wS))
print('NLA on the lens at 10%: ', error_wG[100]/np.max(error_wG))
print('NLA on the lensed source at 10%: ', error_wFS[100]/np.max(error_wFS))
print('NLA on the delensed lens at 10%: ', error_wFG[100]/np.max(error_wFG))
#Display
plt.figure(1)
plt.plot(np.linspace(0,100, 1000), error_wS/np.max(error_wS), 'r', label = 'Source in starlet space', linewidth = 3)
plt.plot(np.linspace(0,100, 1000), error_wFG/np.max(error_wFG), 'c', label = 'Lens in source plane in starlet space', linewidth = 3)
plt.xlabel('percentage of coefficients used in reconstruction', fontsize=25)
plt.ylabel('Error on reconstruction', fontsize=25)
plt.title('Non-linear approximation error in source plane', fontsize=25)
plt.legend(fontsize = 25)
a = plt.axes([0.4, 0.2, 0.45, 0.4])
plt.semilogy(np.linspace(0,100, 1000), (error_wFG/np.max(error_wFG)), 'c', linewidth = 3)
plt.semilogy(np.linspace(0,100, 1000), error_wS/np.max(error_wS), 'r', linewidth = 3)
plt.xlim(20,100)
plt.figure(2)
plt.plot(np.linspace(0,100, 1000), error_wG/np.max(error_wG), 'b', label = 'Galaxy in starlet space', linewidth = 3)
plt.plot(np.linspace(0,100, 1000), error_wFS/np.max(error_wFS), 'm', label = 'Lensed source in starlet space', linewidth = 3)
plt.xlabel('percentage of coefficients used in reconstruction', fontsize=25)
plt.ylabel('Error on reconstruction', fontsize=25)
plt.title('Non-linear approximation error in lens plane', fontsize=25)
plt.legend(fontsize = 25)
a = plt.axes([0.4, 0.2, 0.45, 0.4])
plt.semilogy(np.linspace(0,100, 1000), (error_wFS/np.max(error_wFS)), 'm', linewidth = 3)
plt.semilogy(np.linspace(0,100, 1000), error_wG/np.max(error_wG), 'b', linewidth = 3)
plt.xlim(20,100)
plt.show()
| 4,021 | 33.672414 | 132 | py |
SLIT | SLIT-master/build/lib/SLIT/Solve.py | #from __future__ import division
import wave_transform as mw
import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import matplotlib.cm as cm
from scipy import signal as scp
import scipy.ndimage.filters as med
import MuSCADeT as wine
from numpy import linalg as LA
import multiprocess as mtp
from pathos.multiprocessing import ProcessingPool as Pool
import Lens
import warnings
import tools
warnings.simplefilter("ignore")
##SLIT: Sparse Lens Inversion Technique
def SLIT(Y, Fkappa, kmax, niter, size, PSF, PSFconj, S0 = [0], levels = [0], scheme = 'FB',
mask = [0], lvl = 0, weightS = 1, noise = 'gaussian', tau = 0):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations of the algorithm.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
n1,n2 = np.shape(Y)
# PSFconj = np.rot90(PSF, 2)
#Size of the source
ns1,ns2 = n1*size, n2*size
#Number of starlet scales in source plane
if lvl ==0:
lvl = np.int(np.log2(ns2))
else:
lvl = np.min([lvl,np.int(np.log2(ns2))])
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
img = Y*mask
#Noise in image plane
sigma0 = MAD(Y)
if noise == 'poisson':
if tau ==0:
print('error: Give exposure time')
Y0 =np.copy(Y)
sigma = np.copy(sigma0)
Y = 2./tau*np.sqrt(tau*np.abs(Y)+tau*3./8.+sigma0)*np.sign(tau*Y+tau*3./8.+sigma0)
sigma0 = MAD(Y)
#Mapping of an all-at-one image to source plane
lensed = lens_one(Fkappa, n1,n2, size)
#estimation of the frame of the image in source plane
supp = np.zeros((lvl,lensed.shape[0],lensed.shape[1]))
supp[:,lensed/lensed ==1] =1
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def Lens_op2(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed, square = 1)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
return tools.wave_transform(x, lvl, newwave = 1)
def inverse(x):
return tools.iuwt(x)
#Forward operator
def F_op(X):
return PSF_apply(F_apply(X))
#Inverse operator
def I_op(X):
return Finv_apply(PSFT_apply(X))
#Regularisation (Backward term)
def reg0(X):
return tools.Hard(X, transform, inverse,levels, (ks), supp=supp)
def reg00(X):
return tools.Hard_Threshold(X, transform, inverse,levels, (ks), supp=supp)
def reg1(X):
return tools.Soft(X, transform, inverse,levels*weightS, kmax, supp=supp)
def reg_filter(X):
return tools.mr_filter(X,levels,ks,20,transform, inverse, lvl = lvl)
#Noise simulations to estimate noise levels in source plane
if np.sum(levels)==0:
print('Calculating noise levels')
#levels = simulate_noise(n1,n2, sigma0, size, I_op, transform, lvl)
levels = level_source(n1,n2,sigma0,size,PSFconj, Lens_op2, lensed, lvl)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels.fits', clobber=True)
##Compute spectral norms
op_norm = spectralNorm(ns1,ns2,20,1e-10,F_op,I_op)
wave_norm = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
nu = 0.5#op_norm**2/(2*wave_norm**2)-1./(mu)
mu = 1./(op_norm+wave_norm)
if scheme == 'FB':
repeat =1
else:
repeat = 2
#Initialisation
Res1= []
tau = 0.5
for jr in range(repeat):
trans = (transform(I_op(Y))/levels)[:-1,:,:]
ks0 = np.max(trans[levels[:-1,:,:]!=0])
ks=np.copy(ks0)
steps = (ks0-kmax)/(niter-5)
karg = np.log(kmax/ks0)/(niter-5.)
i = 0
if np.sum(S0) == 0:
S=np.random.randn(ns1,ns2)*np.median(sigma0)*0
else:
S = S0
ts = 1
csi = 0
Res1= []
Res2 = []
alpha =transform(S)
while i < niter:
print(i)
if scheme == 'FB':
ks = ks0*np.exp(i*karg)
ks = np.max([ks, kmax])
S = tools.Forward_Backward(Y, S, F_op, I_op, mu, reg_filter, pos = 1)
S[S<0] = 0
FS = F_op(S)*mask
else:
alpha, csi, ts = tools.FISTA(Y, alpha, F_op, I_op, mu, ts, csi, reg1, transform, inverse, mask = mask)
S = inverse(alpha)
FS = F_op(S)
#Convergence condition
Res1.append((np.std(Y-FS)**2)/np.median(sigma0)**2)
Res2.append(ks)
# ks = ks-steps
i = i+1
S[S<0] = 0
# alpha = transform(S)
weightS = 1./(1.+np.exp(-10.*(levels*kmax-alpha)))
# plt.show()
#Final reconstruction of the source
plt.plot(Res1, 'b'); plt.show()
plt.plot(Res2, 'r');
plt.show()
if noise == 'poisson':
plt.subplot(211)
plt.title('S')
plt.imshow(S); plt.colorbar()
plt.show()
FS = F_op(S)*mask
return S, FS
#############################SLIT MCA for blended lenses############################
def SLIT_MCA(Y, Fkappa, kmax, niter, riter, size,PSF, PSFconj, lvlg = 0, lvls = 0, levels = [0], mask = [0,0], Ginit=0):
##DESCRIPTION:
## Function that estimates the source and lens light profiles from an image of a
## strong lens system
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations in the main loop over G.
## -riter: maximal number of iterations in the inner loop over S.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
## -Ginit: Educated guedd for the lens galaxy light profile. if set to a 2D numpy array, the array will be used as
## as an initialisation for G.
##
##OUTPUTS:
## -S: the source light profile.
## -G: the convolved lens light profile
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
#Shape of the image
n1,n2 = np.shape(Y)
#Initialisation of the source
ns1= n1*size
ns2 = n2*size
#Number of starlet scales in source and image planes
if lvlg ==0:
lvlg = np.int(np.log2(n2))
else:
lvlg = np.min([lvlg,np.int(np.log2(n2))])
lvls = lvlg
if lvls >np.int(np.log2(ns2)):
print('Error, too many wavelet levels for the source. Choose a smaller value for lvl')
exit
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
Y = Y*mask
#Noise standard deviation in image plane
sigma0 = MAD(Y)
#Mapping of an all-at-one image
lensed = lens_one(Fkappa, n1,n2, size)
supp = np.zeros((lvls,lensed.shape[0],lensed.shape[1]))
supp[:,lensed/lensed ==1] =1
#Limits of the image plane in source plane
bound = mk_bound(Fkappa, n1,n2, size)
#Noise levels in image plane in starlet space
levelg = level(n1,n2, lvlg)*sigma0
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
return tools.wave_transform(x, lvlg)
def inverse(x):
return tools.iuwt(x)
#Forward Source operator
def FS_op(X):
return PSF_apply(F_apply(X))
#Inverse Source operator
def IS_op(X):
return Finv_apply(PSFT_apply(X))
#Forward Lens operator
def FG_op(X):
return ((X))
#Inverse Lens operator
def IG_op(X):
return ((X))
#Regularisation (Backward term)
def regG0(X):
return tools.Hard_Threshold(X, transform, inverse, levelg*kG)
def regS0(X):
return tools.Hard_Threshold(X, transform, inverse, levels*kS)
def regS1(X):
return tools.Soft(X, transform, inverse, levels*kmax*weightS, supp = supp)
def regG1(X):
return tools.Soft(X, transform, inverse, levelg*(kmax)*weightG, supp = 1)
def reg_filter(X):
return tools.mr_filter(X, levelg*kG*sigma0, 20, transform, inverse, Soft = 0, pos = 1)
#Noise simulations to estimate noise levels in source plane
if np.sum(levels)==0:
print('Calculating noise levels')
levels = simulate_noise(n1,n2, sigma0, size, IS_op, transform, lvls)
levels[:,lensed ==0] = np.max(levels*10)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels_MCA.fits', clobber=True)
#Computationt of spectral norms
FS_norm = spectralNorm(ns1,ns2,20,1e-10,FS_op,IS_op)
Star_norm_im = spectralNorm(n1,n2,20,1e-10,transform,inverse)
Star_norm_s = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
muG = 1./(Star_norm_im**2)
muS = 1./(Star_norm_s*FS_norm)**2
print(muS, muG)
weightS = 1
weightG = 1
#Reweighting loop
for it in range(3):
#Initialisations
FS = 0
G = np.zeros((n1,n2))
S = np.zeros((ns1,ns2))
alphaS = transform(S)
csiS = np.copy(alphaS)
alphaG = transform(G)
csiG = np.copy(alphaG)
i = 0
K_s = np.zeros(niter)
tg=1
#Beginning of main loop
while i < niter:
print('main loop: ',i)
DS = Y-G
ts = 1
for j in range(riter):
# S = tools.Forward_Backward(DS, S, FS_op, IS_op, muS, regS0)
alphaS, csiS, ts = tools.FISTA(DS, alphaS, FS_op, IS_op, muS, ts, csiS, regS1, transform, inverse, pos = 1)
S = inverse(alphaS)#*supp
S[S<0] = 0
FS = FS_op(S)
DG = Y-FS
for j2 in range(riter):
alphaG, csiG, tg = tools.FISTA(DG, alphaG, FG_op, IG_op, muG, tg, csiG, regG1, transform, inverse, pos = 1)
#Image to approximate by solving the problem in G
G = inverse(alphaG)
#
newres = (np.std(Y-FS-G)**2)/sigma0**2
K_s[i] = newres
res = np.copy(newres)
plt.figure(0)
plt.subplot(221)
plt.title('S')
plt.imshow(S)
plt.subplot(222)
plt.title('FS')
plt.imshow(FS)
plt.subplot(223)
plt.title('G')
plt.imshow(G)
plt.subplot(224)
plt.title('Residuals')
plt.imshow(Y-FS-G)
plt.savefig('Res'+str(i)+'.png')
i +=1
#Weighting
weightS = 1./(1.+np.exp(-10.*(levels*kmax*sigma0-alphaS)))
weightG = 1./(1.+np.exp(-10.*(levelg*kmax*sigma0-alphaG)))
S, FS = SLIT(Y-G, Fkappa, kmax, niter, size, PSF, PSFconj, S0 = S, levels = levels, mask = mask, lvl = lvls)
#Final reconstructions
plt.show()
plt.plot(K_s); plt.show()
return S, FS,G
################################### TOOLS ###################################
def MOM(Y, levels, levelg, kmax, transform, inverse, IS_op, sigma, niter, I = 0):
Gw0 = transform((Y))[:-1,:,:]
levelg1 = levelg[:-1,:,:]
Gw = Gw0[levelg1!=0]/levelg[levelg1!=0]/sigma
kG = np.max(Gw)
kG0 = kG
stepG = (kG-kmax)/(niter-I-5)
FS0 = Y
Sw0 = transform(IS_op(FS0))[:-1,:,:]
levels1 = levels[:-1,:,:]
Sw = Sw0[levels1!=0]/levels[levels1!=0]/sigma
kS = np.max(Sw)
k =np.min([kG,kS])
k = np.max([k,kmax])+(np.abs(kS-kG))/100.
step = (k-kmax)/(niter-I-5)
return k, step
def plot_cube(cube):
##DESCRIPTION:
## Plotting device that displays layers of a cube in different subplot panels.
##
##INPUTS:
## -cube: Cube for which to plot the layers with shape (n,n1,n2) with n, the number of layers and n1xn2, the number of pixels.
##
##OUTPUTS:
## -None
n,n1,n2 = np.shape(cube)
i = n/2
if i == n/2.+0.5:
i+=1
j = 2
for k in range(n):
plt.subplot(i,j,k)
plt.imshow(cube[k,:,:]); plt.colorbar()
return None
def level(n1,n2, lvl):
##DESCRIPTION:
## Estimates the noise levels in starlet space in image plane.
##
##INPUTS:
## -n1,n2: shape of the image for which to get noise levels
##
##OUTPUTS:
## -levels: units of noise levels at each scale and location of a starlet transform
dirac = np.zeros((n1,n2))
# lvl = np.int(np.log2(n1))
dirac[n1/2,n2/2] = 1
wave_dirac = mw.wave_transform(dirac,lvl, newwave = 0)
wave_sum = np.sqrt(np.sum(np.sum(wave_dirac**2,1),1))
levels = np.multiply(np.ones((lvl,n1,n2)).T,wave_sum).T
return levels
def level_source(n1,n2,sigma,size,PSFT, Lens_op2, lensed, lvl):
ns1,ns2 = n1*size, n2*size
ones = np.ones((n1,n2))
lensed[lensed == 0] = 1
noise = ones*sigma
Hnoise = noise*np.sqrt(np.sum(PSFT**2))#np.sqrt(scp.fftconvolve(noise**2, PSFT**2, mode = 'same'))##
FHnoise = Lens_op2(Hnoise)
FHnoise[FHnoise==0] = np.mean(FHnoise)*10.
dirac = np.zeros((ns1,ns2))
dirac[ns1/2,ns2/2] = 1
wave_dirac = mw.wave_transform(dirac, lvl)
levels = np.zeros(wave_dirac.shape)
for i in range(lvl):
if np.size(noise.shape) > 2:
lvlso = (scp.fftconvolve(FHnoise[i, :, :] ** 2, wave_dirac[i, :, :] ** 2,
mode='same'))
else:
lvlso = scp.fftconvolve(FHnoise ** 2, wave_dirac[i,:,:] ** 2,
mode='same')
levels[i, :, :] = np.sqrt(np.abs(lvlso))
return levels
def spectralNorm(nx,ny,Niter,tol,f,finv):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -nx,ny: shape of the input
## -nz: number of decomposition scales (if the operator tis a multiscale decomposition for instance)
## -Niter: number of iterations
## -tol: tolerance error as a stopping criteria
## -f: operator
## -finv: inverse operator
##
##OUTPUTS:
## -SspNorm: The spectral norm of the operator
#### Initilize array with random numbers ###
matA = np.random.randn(nx,ny)
### Normalize the input ###
spNorm = LA.norm(matA)
matA /= spNorm
matA = np.array(matA)
it = 0
err = abs(tol)
while it < Niter and err >= tol:
### Apply operator ###
wt = f(matA)
### Apply joint operator ###
matA = finv(wt)
### Compute norm ###
spNorm_new = LA.norm(matA)
matA /= spNorm_new
err = abs(spNorm_new - spNorm)/spNorm_new
spNorm = spNorm_new
it += 1
return spNorm
def lens_one(Fkappa, n1,n2,size):
##DESCRIPTION:
## Function that maps an all at one image to source plane.
##
##INPUTS:
## -Fkappa: the mapping between source and image planes
## -n1,n2: the shape of the image.
## -size: the factor that scales the shape of the source relative to the shape of the image
##
##OUTPUTS:
## -lensed: the projection to source plane of an all at aone image.
dirac = np.ones((n1,n2))
lensed = Lens.image_to_source(dirac, size,Fkappa,lensed = [0])
return lensed
def mk_bound(Fkappa, n1,n2,size):
##DESCRIPTION:
## Function that returns the support of the lens image in source plane.
##
##INPUTS:
## -Fkappa: the mapping between source and image planes
## -n1,n2: the shape of the image.
## -size: the factor that scales the shape of the source relative to the shape of the image
##
##OUTPUTS:
## -lensed: the projection to source plane of an all at aone image.
dirac = np.ones((n1,n2))
lensed = Lens.image_to_source_bound(dirac, size,Fkappa,lensed = [0])
bound = lensed/lensed
bound[lensed==0]=0
return bound
def MAD(x,n=3):
##DESCRIPTION:
## Estimates the noise standard deviation from Median Absolute Deviation
##
##INPUTS:
## -x: a 2D image for which we look for the noise levels.
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
x = mw.wave_transform(x, np.int(np.log2(x.shape[0])))[0,:,:]
meda = med.median_filter(x,size = (n,n))
medfil = np.abs(x-meda)
sh = np.shape(x)
sigma = 1.48*np.median((medfil))
return sigma
def MAD_poisson(x,tau,n=3):
##DESCRIPTION:
## Estimates the noise standard deviation from Median Absolute Deviation
##
##INPUTS:
## -x: a 2D image for which we look for the noise levels.
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
n1,n2 = np.shape(x)
lvl = np.int(np.log2(x.shape[0]))-1
new_x, y = wine.MCA.mr_filter(x,20,8,MAD(x), lvl = lvl)
plt.imshow(new_x); plt.show()
sigma = np.sqrt(np.abs(new_x)/tau)
return sigma
def ST(alpha, k, levels, sigma, hard = 0):
##DESCRIPTION:
## Soft thresholding operator.
##
##INPUTS:
## -alpha: the starlet decomposition to be thresholded.
## -k: the threshold in units of noise levels (usually 5).
## -levels: the noise levels at each scale and location of the starlet decomposition.
## -sigma: the noise standard deviation.
##
##OUTPUTS:
## -alpha: The thresholded coefficients.
lvl, n1,n2 = np.shape(alpha)
th = np.ones((lvl,n1,n2))*k
th[0,:,:] = th[0,:,:]+3
th[-1,:,:] = 0
alpha0 = np.copy(alpha)
th = th*levels*sigma
if hard == 0:
alpha= np.sign(alpha0)*(np.abs(alpha0)-th)
alpha[np.where(np.abs(alpha)-th<0)]=0
return alpha
def mk_simu(n1,n2,lvl,size, sigma, I_op, transform, n):
storage = np.zeros((lvl,n1*size, n2*size, n))
for i in range(n):
noise = np.random.randn(n1,n2)*sigma
noise_lens = I_op(noise)
noise_lens[noise_lens ==0] = 1
storage[:,:,:,i] = transform(noise_lens)
return storage
def simulate_noise(n1,n2, sigma, size, I_op, transform, lvl, Npar = np.int(mtp.cpu_count()/2)):
##DESCRIPTION:
## Simulates noise levels in source plane from lensing operator and convolution operator.
##
##INPUTS:
## -n1,n2: the shape of the images for which to simulate noise maps.
## -size: scaling factor for the shape of the source.
## -Fkappa: Projection operator between lens and source plane.
## -lensed: mapping of an all at one image to source plane.
## -PSFconj: the conjugate of the PSF
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
n = 500
if Npar>mtp.cpu_count():
Npar = mtp.cpu_count()
ns1,ns2 = n1*size, n2*size
# lvl = np.int(np.log2(ns1))
w_levels = np.zeros((lvl,ns1,ns2))
p = Pool(Npar)
storage = mk_simu(n1,n2,lvl,size, sigma, I_op, transform,n)
w_levels = np.std(storage, axis = 3)
# w_levels[0,:,:] = w_levels[0,:,:]*6/5
return w_levels
| 22,735 | 31.713669 | 131 | py |
SLIT | SLIT-master/build/lib/SLIT/wave_transform.py | import numpy as np
import scipy.signal as cp
import matplotlib.pyplot as plt
import scipy.ndimage.filters as sc
def symmetrise(img, size):
n3, n4 = np.shape(img)
n1,n2 = size
img[:(n3-n1)/2, :] = np.flipud(img[(n3-n1)/2:(n3-n1),:])
img[:,:(n4-n2)/2] = np.fliplr(img[:,(n4-n2)/2:(n4-n2)])
img[(n3+n1)/2:,:] = np.flipud(img[n1:(n3+n1)/2,:])
img[:,(n4+n2)/2:] = np.fliplr(img[:,n2:(n4+n2)/2])
return img
def fft_convolve(X,Y, inv = 0):
XF = np.fft.rfft2(X)
YF = np.fft.rfft2(Y)
# YF0 = np.copy(YF)
# YF.imag = 0
# XF.imag = 0
if inv == 1:
# plt.imshow(np.real(YF)); plt.colorbar(); plt.show()
YF = np.conj(YF)
SF = XF*YF
S = np.fft.irfft2(SF)
n1,n2 = np.shape(S)
S = np.roll(S,-n1/2+1,axis = 0)
S = np.roll(S,-n2/2+1,axis = 1)
return np.real(S)
def wave_transform(img, lvl, Filter = 'Bspline', newwave = 1, convol2d = 0):
mode = 'nearest'
lvl = lvl-1
sh = np.shape(img)
if np.size(sh) ==3:
mn = np.min(sh)
wave = np.zeros([lvl+1,sh[1], sh[1],mn])
for h in np.linspace(0,mn-1, mn):
if mn == sh[0]:
wave[:,:,:,h] = wave_transform(img[h,:,:],lvl+1, Filter = Filter)
else:
wave[:,:,:,h] = wave_transform(img[:,:,h],lvl+1, Filter = Filter)
return wave
n1 = sh[1]
n2 = sh[1]
if Filter == 'Bspline':
h = [1./16, 1./4, 3./8, 1./4, 1./16]
else:
h = [1./4,1./2,1./4]
n = np.size(h)
h = np.array(h)
if n+2**(lvl-1)*(n-1) >= np.min([n1,n2])/2.:
lvl = np.int_(np.log2((n1-1)/(n-1.))+1)
c = img
## wavelet set of coefficients.
wave = np.zeros([lvl+1,n1,n2])
for i in np.linspace(0,lvl-1,lvl):
newh = np.zeros((1,n+(n-1)*(2**i-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
######Calculates c(j+1)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(c, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(c,newh[0,:],axis = 0, mode =mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode =mode)
if newwave ==1:
###### hoh for g; Column convolution
if convol2d == 1:
hc = cp.convolve2d(cnew, H, mode='same', boundary='symm')
else:
hc = sc.convolve1d(cnew,newh[0,:],axis = 0, mode = mode)
###### hoh for g; Line convolution
hc = sc.convolve1d(hc,newh[0,:],axis = 1, mode = mode)
###### wj+1 = cj-hcj+1
wave[i,:,:] = c-hc
else:
###### wj+1 = cj-cj+1
wave[i,:,:] = c-cnew
c = cnew
wave[i+1,:,:] = c
return wave
def iuwt(wave, convol2d =0):
mode = 'nearest'
lvl,n1,n2 = np.shape(wave)
h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
n = np.size(h)
cJ = np.copy(wave[lvl-1,:,:])
for i in np.linspace(1,lvl-1,lvl-1):
newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)
cJ = cnew+wave[lvl-1-i,:,:]
return np.reshape(cJ,(n1,n2))
| 3,715 | 25.169014 | 81 | py |
SLIT | SLIT-master/build/lib/SLIT/tools.py | import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import scipy.ndimage.filters as sc
import scipy.ndimage.filters as med
import scipy.signal as cp
def MAD(x,n=3):
##DESCRIPTION:
## Estimates the noise standard deviation from Median Absolute Deviation
##
##INPUTS:
## -x: a 2D image for which we look for the noise levels.
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
x = wave_transform(x, np.int(np.log2(x.shape[0])))[0,:,:]
meda = med.median_filter(x,size = (n,n))
medfil = np.abs(x-meda)
sh = np.shape(x)
sigma = 1.48*np.median((medfil))
return sigma
def Forward_Backward(Y, X, F_op, I_op, mu, reg, pos = 1):
R = mu*I_op(Y-F_op(X))
Xnew = np.copy(X+R)
Xnew, M = reg(Xnew)
if pos == 1:
Xnew[Xnew<0] = 0
return Xnew, M
def Primal_dual(Y, X, U, mu, nu, tau, F_op, I_op, transform, inverse, reg):
p = X+mu*I_op(Y-F_op(X))-mu*inverse(U)
# plot_cube(U+nu*transform(2*p-X)); plt.show()
q = reg(U+nu*transform(2*p-X))
X =tau*p+(1-tau)*X
U =tau*q+(1-tau)*U
# plot_cube(q); plt.show()
return X,U
def FISTA(Y, alphaX, F_op, I_op, mu, ts, csi, reg, transform, inverse, pos = 1, mask = 1):
S = inverse(alphaX)
#S[S>size*np.max(Y)] = np.max(Y)
# S[S<0] = 0
R = mu*I_op(Y-F_op(S)*mask)
alpha = transform(R)+csi
alpha, M = reg(alpha)
tsnew = (1.+np.sqrt(1.+4.*ts**2))/2.
csi = alpha+((ts-1)/tsnew)*(alpha-alphaX)
return alpha, csi, tsnew
def Soft(X, transform, inverse, level, k, supp =1):
Xnew = np.sign(X)*(np.abs(X)-level*k)
Xnew[np.where((np.abs(X)-level*k)<0)] = 0
Xnew[-1,:,:] = X[-1,:,:]
#print(Xnew.shape, supp.shape)
Xnew = Xnew*supp
return Xnew
def Soft_Threshold(X, transform, inverse, level, k, supp =1):
X = transform(X)
alpha = wave_transform(X,Xw.shape[0],newwave = 0)
M = np.zeros(alpha.shape)
M[np.abs(alpha)-level*k>0] = 1
M[0,:,:] = 0
M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
Xnew = np.sign(X)*(np.abs(X)-level*k)
Xnew = Xnew*M
Xnew[-1,:,:] = X[-1,:,:]
Xnew = Xnew*supp
return inverse(Xnew), M
def Hard(X, transform, inverse, level, k, supp=1):
Xnew = np.copy(X)
Xnew[np.where((np.abs(X)-level*k)<0)] = 0
Xnew[-1,:,:] = X[-1,:,:]
Xnew = Xnew*supp
## plt.figure(0)
## plot_cube(X)
## plt.figure(1)
## plot_cube(Xnew)
## plt.show()
return Xnew, M
def Hard_Threshold(X, transform, inverse, level, k, supp=1):
Xw = transform(X)
alpha = wave_transform(X,Xw.shape[0],newwave = 0)
M = np.zeros(alpha.shape)
M[np.abs(alpha)-level*k>0] = 1
M[0,:,:] = 0
M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
Xnew=M*Xw
Xnew[-1,:,:] = Xw[-1,:,:]
Xnew = Xnew*supp
return inverse(Xnew), M
def mr_filter(Y, level, k, niter, transform, inverse, lvl = 6, Soft = 0, pos = 1):
Xnew = 0
alpha = wave_transform(Y, lvl, newwave=0)
M = np.zeros(alpha.shape)
M[np.abs(alpha)-level*k>0] = 1
M[0,:,:] = 0
M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
M[-1,:,:] =1
i=0
while i < niter:
R = Y-Xnew
if Soft == True :
Rnew= Soft_threshold(R, transform, inverse, level,k)
else:
Rnew = Hard_Threshold(R, transform, inverse, level,k)
# Rnew = inverse(transform(R)*M)
Xnew = Xnew+Rnew
if pos == True:
Xnew[Xnew < 0] = 0
i = i+1
return (Xnew), M
def wave_transform(img, lvl, Filter = 'Bspline', newwave = 1, convol2d = 0):
mode = 'nearest'
lvl = lvl-1
sh = np.shape(img)
if np.size(sh) ==3:
mn = np.min(sh)
wave = np.zeros([lvl+1,sh[1], sh[1],mn])
for h in np.linspace(0,mn-1, mn):
if mn == sh[0]:
wave[:,:,:,h] = wave_transform(img[h,:,:],lvl+1, Filter = Filter)
else:
wave[:,:,:,h] = wave_transform(img[:,:,h],lvl+1, Filter = Filter)
return wave
n1 = sh[1]
n2 = sh[1]
if Filter == 'Bspline':
h = [1./16, 1./4, 3./8, 1./4, 1./16]
else:
h = [1./4,1./2,1./4]
n = np.size(h)
h = np.array(h)
if n+2**(lvl-1)*(n-1) >= np.min([n1,n2])/2.:
lvl = np.int_(np.log2((n1-1)/(n-1.))+1)
c = img
## wavelet set of coefficients.
wave = np.zeros([lvl+1,n1,n2])
for i in np.linspace(0,lvl-1,lvl):
newh = np.zeros((1,n+(n-1)*(2**i-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
######Calculates c(j+1)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(c, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(c,newh[0,:],axis = 0, mode =mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode =mode)
if newwave ==1:
###### hoh for g; Column convolution
if convol2d == 1:
hc = cp.convolve2d(cnew, H, mode='same', boundary='symm')
else:
hc = sc.convolve1d(cnew,newh[0,:],axis = 0, mode = mode)
###### hoh for g; Line convolution
hc = sc.convolve1d(hc,newh[0,:],axis = 1, mode = mode)
###### wj+1 = cj-hcj+1
wave[i,:,:] = c-hc
else:
###### wj+1 = cj-cj+1
wave[i,:,:] = c-cnew
c = cnew
wave[i+1,:,:] = c
return wave
def iuwt(wave, convol2d =0):
mode = 'nearest'
lvl,n1,n2 = np.shape(wave)
h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
n = np.size(h)
cJ = np.copy(wave[lvl-1,:,:])
for i in np.linspace(1,lvl-1,lvl-1):
newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)
cJ = cnew+wave[lvl-1-i,:,:]
return np.reshape(cJ,(n1,n2))
def plot_cube(cube):
##DESCRIPTION:
## Plotting device that displays layers of a cube in different subplot panels.
##
##INPUTS:
## -cube: Cube for which to plot the layers with shape (n,n1,n2) with n, the number of layers and n1xn2, the number of pixels.
##
##OUTPUTS:
## -None
n,n1,n2 = np.shape(cube)
i = n/2
if i == n/2.+0.5:
i+=1
j = 2
for k in range(n):
plt.subplot(i,j,k)
plt.imshow(cube[k,:,:]); plt.colorbar()
return None
| 7,049 | 26.539063 | 131 | py |
SLIT | SLIT-master/build/lib/SLIT/Lens.py | import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import scipy.signal as scp
import warnings
warnings.simplefilter("ignore")
#Tool box for lensing
def SIS(x0,y0,n1,n2,Re):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
count = 0
for i in x:
kappa[x[count],y[count]] = Re/(2*np.sqrt((x[count]-x0)**2+(y[count]-y0)**2))
count += 1
if np.isfinite(kappa[x0,y0]) == False:
kappa[x0,y0] = 1
return kappa
def SIE_xy(x,y,x0,y0,b,beta,q,xc,theta):
eps = (1-q**2)/(1+q**2)
up = b**(beta-1)
pre = up/(2*(1-eps)**((beta-1)/2))
count = 0
theta = theta*np.pi/180.
Xr = (x-x0)*np.cos(theta)-(y-y0)*np.sin(theta)
Yr = (x-x0)*np.sin(theta)+(y-y0)*np.cos(theta)
kappa = pre/((xc**2.)/(1.-eps)+(Xr)**2.+((Yr)**2.)/q**2.)**((beta-1.)/2.)
return kappa
def SIE(x0,y0,n1,n2,b,beta,q,xc,theta):
kappa = np.zeros((n1,n2))
x,y = np.where(kappa == 0)
x2d = np.reshape(x, (n1,n2))
y2d = np.reshape(y, (n1,n2))
kappa = SIE_xy(x2d,y2d,x0,y0,b,beta,q,xc,theta)
return kappa
def alpha_def(kappa, n1,n2,x0,y0,extra):
#Computes the deflection angle of a single photon at coordinates theta in the source plane and a lens
#mass distribution kappa
nk1,nk2 = np.shape(kappa)
x0+=extra/2.
y0+=extra/2.
#Coordonnees de la grille de l'espace image
[x,y] = np.where(np.zeros([nk1,nk2])==0)
[xk,yk] = np.where(np.zeros([nk1,nk2])==0)
xc = np.reshape((x)-x0,(nk1,nk2))
yc = np.reshape((y)-y0,(nk1,nk2))
xkc = np.reshape((xk)-(nk1/2.),(nk1,nk2))
ykc = np.reshape((yk)-(nk2/2.),(nk1,nk2))
r = (xc**2+yc**2)
rk = np.sqrt(xkc**2+ykc**2)
lx,ly = np.where(r==0)
tabx = np.reshape((xc)/r,(nk1,nk2))
taby = np.reshape((yc)/r,(nk1,nk2))
tabx[lx,ly]=0
taby[lx,ly]=0
l = 0
kappa = kappa.astype(float)
tabx = tabx.astype(float)
# kappa[rk>(nk1)/2.] = 0
intex = scp.fftconvolve(tabx, (kappa), mode = 'same')/np.pi
intey = scp.fftconvolve(taby, (kappa), mode = 'same')/np.pi
return intex[x0-(n1)/2.:x0+(n1)/2,y0-(n2)/2.:y0+(n2)/2.],intey[x0-(n1)/2.:x0+(n1)/2,y0-(n2)/2.:y0+(n2)/2.]
def beta(kappa,theta):
#Computes beta
beta = theta-alpha(theta,kappa)
return beta
def theta(alpha, beta):
#Computes beta
theta = beta+alpha
return beta
def F(kappa, nt1,nt2, size, x0, y0, extra=100, x_shear = 0, y_shear = 0, alpha_x_in = [-99], alpha_y_in = [-99]):
# Theta positions for each pixel in beta
if np.sum(alpha_x_in) != [-99]:
alpha_x = alpha_x_in
alpha_y = alpha_y_in
else:
nk1,nk2 = np.shape(kappa)
alpha_x,alpha_y = alpha_def(kappa,nt1,nt2,x0,y0,extra)
alpha_x = alpha_x+x_shear
alpha_y = alpha_y+y_shear
na1,na2 = np.shape(alpha_x)
xa,ya = np.where(np.zeros((na1,na2)) == 0)
# xa = xa-(x0+extra/2)
#ya = ya-(y0+extra/2)
nb1=nt1*size
nb2=nt2*size
xb, yb = np.where(np.zeros((nb1,nb2))==0)
#Scaling of the source grid
#Scaling of the deflection grid
xa = xa*(np.float(nt1)/np.float(na1))#-0.68
ya = ya*(np.float(nt2)/np.float(na2))#-0.68
#Setting images coordinates in 2d
xa2d = np.reshape(xa,(na1,na2))
ya2d = np.reshape(ya,(na1,na2))
F2 = []
for i in range(np.size(xb)):
#Deflection of photons emitted in xb[i],yb[i]
theta_x = (xb[i])*(np.float(nt1)/np.float(nb1))+alpha_x
theta_y = (yb[i])*(np.float(nt2)/np.float(nb2))+alpha_y
#Matching of arrivals with pixels in image plane
xprox = np.int_(np.abs((xa2d-theta_x)*2))
yprox = np.int_(np.abs((ya2d-theta_y)*2))
if np.min(xprox+yprox) <1.5:
loc2 = np.where((xprox+yprox)==np.min(xprox+yprox))#
else:
loc2 = []
if (np.size(loc2)==0):
F2.append([0])
else:
F2.append((np.array(loc2))-1)
return F2
def source_to_image(Source, nt1,nt2, theta, ones = 1):
# Source: Image of the source in the source plane
# n1,n2: size in pixels of the postage stamp in image plane
# F: the coordinates of the lens mapping
F = (theta)
nb1,nb2 = np.shape(Source)
if ones == 1:
onelens = source_to_image(np.ones(Source.shape), nt1,nt2, theta, ones = 0)
onelens[np.where(onelens==0)]=1
else:
onelens = 1.
Image = np.zeros((nt1,nt2))
xb,yb = np.where(np.zeros((nb1,nb2)) == 0)
N = np.size(xb)
k=0
for pos in F:
if np.size(np.shape(pos)) != 1:
Image[np.array(pos[0][np.where((pos[0][:]<nt1))]),
np.array(pos[1][np.where(pos[1][:]<nt2)])] += Source[xb[k],yb[k]]#fullSource
k=k+1
return Image/onelens
def image_to_source(Image, size,beta,lensed = 0, square = 0):
# Image: postagestamp of the observed image
# nsize1,nsize2: size of the postagestamp in source plane
# F: lens mapping matrix
F = (beta)
nt1,nt2 = np.shape(Image)
nb1 = nt1*size
nb2 = nt2*size
Source = np.zeros((nb1,nb2))
xb,yb = np.where(Source == 0)
N = np.size(xb)
for k in range(N):
pos = F[k]
if np.size(np.shape(pos)) > 1:
if np.sum(lensed) !=0:
if square == 0:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])/np.max([1,np.size(pos[0][:])])
else:
Source[xb[k], yb[k]] += np.sum((Image[np.array(pos[0][:]),
np.array(pos[1][:])] / np.max([1, np.size(pos[0][:])]))**2)
else:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])
if square == 1:
Source = np.sqrt(Source)
return Source
def image_to_source_bound(Image, size,beta,lensed = 0):
# Image: postagestamp of the observed image
# nsize1,nsize2: size of the postagestamp in source plane
# F: lens mapping matrix
F = (beta)
nt1,nt2 = np.shape(Image)
nb1 = nt1*size
nb2 = nt2*size
Source = np.zeros((nb1,nb2))
xb,yb = np.where(Source == 0)
N = np.size(xb)
for k in range(N):
pos = F[k]
if np.size(np.shape(pos)) > 1:
if np.sum(lensed) !=0:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])/np.max([1,np.size(pos[0][:])])
else:
Source[xb[k],yb[k]] += np.sum(Image[np.array(pos[0][:]),
np.array(pos[1][:])])
return Source
| 7,136 | 28.987395 | 123 | py |
SLIT | SLIT-master/build/lib/SLIT/__init__.py | from Solve import *
import Lens
import wave_transform
import tools
| 67 | 12.6 | 21 | py |
eEVM | eEVM-main/3rdparty/intx/test/fuzzer/decode.py | #!/usr/bin/env python3
import os
import sys
ops_filter = ()
ops = ('/', '*', '<<', '>>', '+', '-', 's/')
def err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def decode_file(file):
with open(file, 'rb') as f:
print("Decoding {}".format(file))
decode_data(f.read())
def decode_data(data):
arg_size = (len(data) - 1) // 2
if arg_size not in (16, 32, 64):
err("Incorrect argument size: {}".format(arg_size))
return
op_index = int(data[0])
if op_index >= len(ops):
return
op = ops[op_index]
if ops_filter and op not in ops_filter:
return
x = int.from_bytes(data[1:1 + arg_size], byteorder='big')
y = int.from_bytes(data[1 + arg_size:], byteorder='big')
print("argument size: {}".format(arg_size))
print(x, op, y)
print(hex(x), op, hex(y))
if op in ('/', 's/'):
print("Test:")
print("{")
print(" {}_u512,".format(hex(x)))
print(" {}_u512,".format(hex(y)))
print(" {}_u512,".format(hex(x // y)))
print(" {}_u512,".format(hex(x % y)))
print("},")
if op == 's/':
ax = (-x) % 2**512
ay = (-y) % 2**512
print("Test:")
print("{")
print(" {}_u512,".format(hex(ax)))
print(" {}_u512,".format(hex(ay)))
print(" {}_u512,".format(hex(ax // ay)))
print(" {}_u512,".format(hex(ax % ay)))
print("},")
assert len(sys.argv) > 1
path = sys.argv[1]
if (os.path.isfile(path)):
decode_file(path)
else:
for root, _, files in os.walk(path):
for file in files:
decode_file(os.path.join(root, file))
| 1,690 | 22.486111 | 61 | py |
frodo | frodo-main/setup.py | from setuptools import setup, find_packages
install_requires=[
'flask',
'rdflib',
'nltk',
'shortuuid',
'fredclient @ git+https://github.com/anuzzolese/fredclient'
]
setup(name='frodo', version='1.0.0',
packages=find_packages(), install_requires=install_requires)
| 284 | 20.923077 | 64 | py |
frodo | frodo-main/demo/frodo_webapp.py | from flask import Flask, Response, render_template, request
from frodo import Frodo
import shortuuid
import webapp_conf
myapp = Flask(__name__)
@myapp.route("/")
def index():
text = request.args.get("text")
ontology_id = request.args.get("ontology-id")
if text:
print(text)
#namespace = ''.join([webapp_conf.NS, shortuuid.uuid(text)[:8], '/'])
namespace = webapp_conf.NS
frodo = Frodo(
namespace=namespace,
fred_uri=webapp_conf.FRED_ENDPOINT,
fred_key=webapp_conf.FRED_KEY
)
ontology = frodo.generate(text, ontology_id)
return Response(ontology.serialize(format='text/turtle'),
mimetype='text/turtle',
headers={"Content-disposition":
"attachment; filename=ontology.ttl"})
else:
return render_template("index.html", basepath=webapp_conf.BASEPATH)
if __name__ == '__main__':
myapp.run(port=webapp_conf.PORT)
| 972 | 28.484848 | 77 | py |
frodo | frodo-main/demo/webapp_conf.py | import shortuuid
FRED_ENDPOINT = 'http://wit.istc.cnr.it/stlab-tools/fred'
FRED_KEY = ''
NS = 'https://w3id.org/stlab/ontology/'
PORT = 5555
BASEPATH = './'
shortuuid.set_alphabet("0123456789abcdefghijkmnopqrstuvwxyz")
| 220 | 23.555556 | 61 | py |
frodo | frodo-main/demo/demo.py | from frodo import Frodo
import shortuuid
import webapp_conf
#shortuuid.set_alphabet("0123456789abcdefghijkmnopqrstuvwxyz")
sentence = 'What cars cost more than $50,000?'
namespace = ''.join([webapp_conf.NS, shortuuid.uuid(sentence)[:8], '/'])
frodo = Frodo(
namespace=namespace,
fred_uri=webapp_conf.FRED_ENDPOINT,
fred_key=webapp_conf.FRED_KEY
)
#sentence = 'What are the contaminated sites in the area of Pavia recorded in 2020?'
#sentence = 'Who well commissioned a cultural property at a certain time?'
ontology = frodo.generate(sentence)
print(ontology.serialize(format='text/turtle'))
| 609 | 26.727273 | 84 | py |
frodo | frodo-main/frodo/taxonomic_queries.py | update = {
'alias_instance': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?alias_instance a fred:Alias;\n fred:aliasOf ?instance.\n\n ?s ?p ?o. # remove all statements about the fred:Alias class\n}\nINSERT {\n ?alias_instance owl:sameAs ?instance.\n}\nWHERE {\n OPTIONAL {\n ?alias_instance a fred:Alias;\n fred:aliasOf ?instance.\n\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?alias_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Alias || ?p = fred:aliasOf || ?o = fred:Alias).\n}\n',
'declaration': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX boxing: <http://www.ontologydesignpatterns.org/ont/boxer/boxing.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance ?p ?o.\n}\nINSERT {\n [ a ?sub_class].\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n # OPTIONAL {\n ?instance a fred:Topic;\n boxing:declaration/rdf:type ?super_class.\n\n ?instance ?p ?o.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n BIND(?instance AS ?sub_class).\n # }\n}\n',
'genre_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Genre;\n fred:genreOf ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?instance a ?super_class.\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Genre.\n\n OPTIONAL {\n ?instance a ?sub_class.\n FILTER(?sub_class != fred:Genre).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n }\n\n ?instance fred:genreOf ?super_class_instance.\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n OPTIONAL {\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n }\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Genre || ?p = fred:genreOf || ?o = fred:Genre).\n}\n',
'kind_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Kind;\n fred:kindOf ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?instance a ?super_class.\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Kind.\n\n OPTIONAL {\n ?instance a ?sub_class.\n FILTER(?sub_class != fred:Kind).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n }\n\n ?instance fred:kindOf ?super_class_instance.\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n OPTIONAL {\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n }\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Kind || ?p = fred:kindOf || ?o = fred:Kind).\n}\n',
'name_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Name;\n fred:nameOf ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?instance a ?super_class.\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Name.\n\n OPTIONAL {\n ?instance a ?sub_class.\n FILTER(?sub_class != fred:Name).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n }\n\n ?instance fred:nameOf ?super_class_instance.\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n OPTIONAL {\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n }\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Name || ?p = fred:nameOf || ?o = fred:Name).\n}\n',
'quantity_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Quantity;\n ?typeOf_property ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Quantity, ?sub_class.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n\n ?instance ?typeOf_property ?super_class_instance.\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?typeOf_property), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(?typeOf_property = IRI(CONCAT("http://www.ontologydesignpatterns.org/ont/fred/", LCASE(REPLACE(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/", "")), "Of"))).\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Quantity || ?o = fred:Quantity).\n}\n',
'series_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Series;\n ?typeOf_property ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Series, ?sub_class.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n\n ?instance ?typeOf_property ?super_class_instance.\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?typeOf_property), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(?typeOf_property = IRI(CONCAT("http://www.ontologydesignpatterns.org/ont/fred/", LCASE(REPLACE(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/", "")), "Of"))).\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Series || ?o = fred:Series).\n}\n',
'species_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Species;\n ?typeOf_property ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Species, ?sub_class.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n\n ?instance ?typeOf_property ?super_class_instance.\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?typeOf_property), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(?typeOf_property = IRI(CONCAT("http://www.ontologydesignpatterns.org/ont/fred/", LCASE(REPLACE(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/", "")), "Of"))).\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Species || ?o = fred:Species).\n}\n',
'type_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance ?typeOf_property ?super_class_instance.\n}\nINSERT {\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n # OPTIONAL {\n ?instance a ?sub_class.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n\n ?instance ?typeOf_property ?super_class_instance.\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?typeOf_property), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(?typeOf_property = IRI(CONCAT("http://www.ontologydesignpatterns.org/ont/fred/", LCASE(REPLACE(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/", "")), "Of"))).\n # }\n\n}\n',
'variety_of': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\nPREFIX fred: <http://www.ontologydesignpatterns.org/ont/fred/>\n\nDELETE {\n ?instance a fred:Variety;\n fred:varietyOf ?super_class_instance.\n\n ?s ?p ?o.\n}\nINSERT {\n ?instance a ?super_class.\n ?sub_class rdfs:subClassOf ?super_class.\n}\nWHERE {\n OPTIONAL {\n ?instance a fred:Variety.\n\n OPTIONAL {\n ?instance a ?sub_class.\n FILTER(?sub_class != fred:Variety).\n FILTER(STRSTARTS(STR(?sub_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n MINUS {\n ?sub_class rdfs:subClassOf+ dul:Event.\n }\n }\n\n ?instance fred:varietyOf ?super_class_instance.\n FILTER(STRSTARTS(STR(?super_class_instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n OPTIONAL {\n ?super_class_instance a ?super_class.\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n }\n }\n\n # Clean up the knowledge graph\n ?s ?p ?o.\n FILTER(?s = fred:Variety || ?p = fred:varietyOf || ?o = fred:Variety).\n}\n',
}
construct = {
'fred_taxonomy': 'PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>\n\nCONSTRUCT {\n ?domain_class_FrODO a owl:Class;\n rdfs:label ?domain_class_label;\n rdfs:subClassOf ?super_class_FrODO;\n owl:equivalentClass ?equiv_class_FrODO.\n\n ?super_class_FrODO a owl:Class;\n rdfs:label ?super_class_label;\n rdfs:subClassOf ?ancestor_class_FrODO.\n\n ?equiv_class_FrODO a owl:Class;\n rdfs:label ?equiv_class_label.\n\n ?ancestor_class_FrODO a owl:Class;\n rdfs:label ?ancestor_class_label.\n}\nWHERE {\n ?instance a ?domain_class.\n FILTER(!STRENDS(STR(?domain_class), "Disjunct")). # remove this when the disjunction pattern is implemented\n FILTER(STRSTARTS(STR(?domain_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?domain_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?domain_class_label).\n BIND(IRI(REPLACE(STR(?domain_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?domain_class_FrODO).\n\n MINUS {\n ?domain_class rdfs:subClassOf+ dul:Event.\n }\n\n OPTIONAL {\n ?domain_class rdfs:subClassOf+ ?super_class.\n FILTER(STRSTARTS(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?super_class_label).\n BIND(IRI(REPLACE(STR(?super_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?super_class_FrODO).\n }\n\n OPTIONAL {\n ?domain_class owl:equivalentClass+ ?equiv_class.\n FILTER(STRSTARTS(STR(?equiv_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?equiv_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?equiv_class_label).\n BIND(IRI(REPLACE(STR(?equiv_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?equiv_class_FrODO).\n }\n\n OPTIONAL {\n ?super_class rdfs:subClassOf+ ?ancestor_class.\n FILTER(STRSTARTS(STR(?ancestor_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?ancestor_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?ancestor_class_label).\n BIND(IRI(REPLACE(STR(?ancestor_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?ancestor_class_FrODO).\n }\n}\n',
'sameas_instance': 'CONSTRUCT {\n ?instance_FrODO a owl:NamedIndividual, ?instance_class_FrODO;\n rdfs:label ?instance_label;\n owl:sameAs ?alias_FrODO.\n\n ?alias_FrODO a owl:NamedIndividual, ?alias_class_FrODO;\n rdfs:label ?alias_label;\n owl:sameAs ?instance_FrODO.\n\n ?instance_class_FrODO a owl:Class;\n rdfs:label ?instance_class_label.\n\n ?alias_class_FrODO a owl:Class;\n rdfs:label ?alias_class_label.\n}\nWHERE {\n ?alias owl:sameAs ?instance.\n FILTER(STRSTARTS(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/")).\n FILTER(STRSTARTS(STR(?alias), "http://www.ontologydesignpatterns.org/ont/fred/")).\n\n BIND(STRLANG(REPLACE(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?instance_label).\n BIND(IRI(REPLACE(STR(?instance), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?instance_FrODO).\n\n BIND(STRLANG(REPLACE(STR(?alias), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?alias_label).\n BIND(IRI(REPLACE(STR(?alias), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?alias_FrODO).\n\n OPTIONAL {\n ?instance a ?instance_class.\n FILTER(STRSTARTS(STR(?instance_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?instance_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?instance_class_label).\n BIND(IRI(REPLACE(STR(?instance_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?instance_class_FrODO).\n }\n\n OPTIONAL {\n ?alias a ?alias_class.\n FILTER(STRSTARTS(STR(?alias_class), "http://www.ontologydesignpatterns.org/ont/fred/")).\n BIND(STRLANG(REPLACE(STR(?alias_class), "http://www.ontologydesignpatterns.org/ont/fred/", ""), "en") AS ?alias_class_label).\n BIND(IRI(REPLACE(STR(?alias_class), "http://www.ontologydesignpatterns.org/ont/fred/", "https://w3id.org/stlab/ontology/")) AS ?alias_class_FrODO).\n }\n}\n',
}
| 16,514 | 916.5 | 2,617 | py |
frodo | frodo-main/frodo/frodo.py | import os
import re
from fredclient import FREDClient, FREDParameters, FREDDefaults
import nltk
from nltk.stem import WordNetLemmatizer
from rdflib import RDFS, RDF, OWL, XSD, URIRef, Literal, BNode, Graph, Namespace
from rdflib.paths import evalPath, OneOrMore
from abc import ABC, abstractmethod
from typing import List, Dict, NoReturn, Tuple
from rdflib.term import URIRef
import frodo.taxonomic_queries as taxonomic_queries
nltk.download('wordnet')
class MorphUtils:
LEMMATIZER: WordNetLemmatizer = WordNetLemmatizer()
@staticmethod
def get_namespace(uriref: URIRef) -> str:
uriref_str = str(uriref)
last_hash = uriref_str.rfind('#')
last_slash = uriref_str.rfind('/')
return uriref_str[:last_hash+1] if last_hash > last_slash else uriref_str[:last_slash+1]
@staticmethod
def get_id(uriref: URIRef) -> str:
uriref_str = str(uriref)
last_hash = uriref_str.rfind('#')
last_slash = uriref_str.rfind('/')
return uriref_str[last_hash+1:] if last_hash > last_slash else uriref_str[last_slash+1:]
@staticmethod
def labelize_uriref(uriref: URIRef, lang: str = None, datatype: URIRef = None) -> Literal:
ns = MorphUtils.get_namespace(uriref)
uriref_str = str(uriref)
term_id = uriref_str.replace(ns, '')
label = term_id[0:1] + re.sub('([A-Z]{1})', r' \1', term_id[1:]).lower()
return Literal(label, lang) if lang else Literal(label, datatype) if datatype else Literal(label)
@staticmethod
def migrate_taxonomy(g: Graph, source_cls: URIRef, target_cls: URIRef, gerundify: bool = False, predicate: URIRef = RDFS.subClassOf) -> NoReturn:
ontology = Graph()
ns = MorphUtils.get_namespace(target_cls)
for subclass in g.objects(source_cls, predicate):
if str(subclass).startswith(FREDDefaults.DEFAULT_FRED_NAMESPACE):
if gerundify:
sc = URIRef(ns + MorphUtils.gerundify(subclass))
else:
sc = URIRef(ns + MorphUtils.get_id(subclass))
ontology.add((target_cls, RDFS.subClassOf, sc))
ontology.add((sc, RDF.type, OWL.Class))
label = MorphUtils.labelize_uriref(sc, 'en')
ontology.add((sc, RDFS.label, label))
ontology += MorphUtils.migrate_taxonomy(g, subclass, sc, gerundify)
return ontology
@staticmethod
def inverse(predicate: URIRef) -> URIRef:
ns = MorphUtils.get_namespace(predicate)
predicate_id = predicate.replace(ns, '')
add_of = False
if predicate_id.startswith('involves'):
predicate_id = predicate_id.replace('involves', '') + 'InvolvedIn'
else:
add_of = True
inverse_predicate_id = 'is' + predicate_id.capitalize()
if add_of:
inverse_predicate_id += 'Of'
return URIRef(ns + inverse_predicate_id)
@staticmethod
def gerundify(term: URIRef) -> Graph:
class_label = MorphUtils.get_id(term)
class_label_terms = class_label[0:1] + re.sub('([A-Z]{1})', r' \1', class_label[1:])
class_label_terms = class_label_terms.split()
lemma = MorphUtils.LEMMATIZER.lemmatize(class_label_terms[-1], 'v')
isVowel = lambda c: c.lower() in 'aeiou'
isConsonant = lambda c: c.lower() not in 'aeiou'
if len(lemma) >= 3 and \
isVowel(lemma[-3]) and isVowel(lemma[-2]) and isConsonant(lemma[-1]):
# e.g. 'speak' -> 'speaking'
gerundive = f'{lemma}ing'
elif len(lemma) >= 3 and \
isConsonant(lemma[-3]) and isVowel(lemma[-2]) and isConsonant(lemma[-1]):
# TODO: Verbs of this kind whose last syllable is not stressed are not
# subject to a doubling of the final consonant!
# (e.g. 'render' -> 'rendering')
# e.g. 'run' -> 'running'
gerundive = f'{lemma}{lemma[-1]}ing'
elif lemma.endswith('ic'):
# e.g. panic -> panicking
gerundive = f'{lemma}king'
elif lemma.endswith('ee'):
# e.g. 'see' -> 'seeing'
gerundive = f'{lemma}ing'
elif lemma.endswith('ie'):
# e.g. 'die' -> 'dying'
gerundive = f'{lemma[:-2]}ying'
elif lemma.endswith('e'):
# e.g. 'make' -> 'making'
gerundive = f'{lemma[:-1]}ing'
else:
# e.g. 'study' -> 'studying'
gerundive = f'{lemma}ing'
gerundive = gerundive.capitalize()
return "".join(class_label_terms[:-1]) + gerundive
class MorphismI(ABC):
def __init__(self, ns: Namespace):
self._ns = ns
@abstractmethod
def morph(self, g: Graph) -> Graph:
pass
class TaxonomyMorphism(MorphismI):
def morph(self, g: Graph) -> Graph:
ontology = Graph()
ontology.bind('owl', OWL)
ontology.bind('rdf', RDF)
ontology.bind('rdf', RDFS)
# Apply UPDATE queries to convert graph patterns to taxonomy axioms
for update_query in taxonomic_queries.update:
g = self._update_query(g, update_query)
# Apply CONSTRUCT queries to generate the ontology draft
for construct_query in taxonomic_queries.construct:
ontology += self._construct_query(g, construct_query)
return g, ontology
def _update_query(self, g: Graph, query_name: str):
sparql = taxonomic_queries.update[query_name]
sparql = sparql.replace('$FRED_NS', FREDDefaults.DEFAULT_FRED_NAMESPACE)
g.update(sparql)
return g
def _construct_query(self, g: Graph, query_name: str):
sparql = taxonomic_queries.construct[query_name]
sparql = sparql.replace('$FRED_NS', FREDDefaults.DEFAULT_FRED_NAMESPACE)
sparql = sparql.replace('$FrODO_NS', self._ns) # ontology namespace
sparql = sparql.replace('$LANG', 'en') # language for labels
result = g.query(sparql)
return result.graph
class BinaryRelationMorphism(MorphismI):
def morph(self, g: Graph) -> Graph:
ontology = Graph()
sparql = f'''
PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>
PREFIX owl: <{OWL._NS}>
SELECT ?subj ?subjtype ?subjsameastype ?rel ?obj ?objtype ?objsameastype
WHERE{{
?subj ?rel ?obj
OPTIONAL{{
?subj a ?subjtype
FILTER(REGEX(STR(?subjtype), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}'))
}}
OPTIONAL{{
?obj a ?objtype
FILTER(REGEX(STR(?objtype), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}'))
}}
OPTIONAL{{
?subj owl:sameAs/rdf:type ?subjsameastype
}}
OPTIONAL{{
?obj owl:sameAs/rdf:type ?objsameastype
}}
OPTIONAL{{
?subjtype rdfs:subClassOf ?subtype .
?subtype rdfs:subClassOf* dul:Event}}
FILTER(!BOUND(?subtype))
FILTER(REGEX(STR(?rel), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}'))
}}
'''
resultset = g.query(sparql)
for row in resultset:
subjtype = row.subjtype
subjsameastype = row.subjsameastype
binary_predicate = row.rel
objtype = row.objtype
objsameastype = row.objsameastype
if subjtype:
subj = subjtype
elif subjsameastype:
subj = subjsameastype
else:
subj = None
if objtype:
obj = objtype
elif objsameastype:
obj = objsameastype
else:
obj = None
if subj and obj:
subject_id = MorphUtils.get_id(subj)
predicate_id = MorphUtils.get_id(binary_predicate)
object_id = MorphUtils.get_id(obj)
subject_class = URIRef(self._ns + subject_id)
ontology.add((subject_class, RDF.type, OWL.Class))
ontology.add((subject_class, RDFS.label, MorphUtils.labelize_uriref(subject_class, 'en')))
ontology += MorphUtils.migrate_taxonomy(g, subj, subject_class)
object_class = URIRef(self._ns + object_id)
ontology.add((object_class, RDF.type, OWL.Class))
ontology.add((object_class, RDFS.label, MorphUtils.labelize_uriref(object_class, 'en')))
ontology += MorphUtils.migrate_taxonomy(g, obj, object_class)
object_property = URIRef("".join([self._ns, predicate_id, object_id]))
ontology.add((object_property, RDF.type, OWL.ObjectProperty))
ontology.add((object_property, RDFS.domain, OWL.Thing))
ontology.add((object_property, RDFS.range, object_class))
ontology.add((object_property, RDFS.label, MorphUtils.labelize_uriref(object_property, 'en')))
inverse_object_property = MorphUtils.inverse(object_property)
ontology.add((inverse_object_property, RDF.type, OWL.ObjectProperty))
ontology.add((inverse_object_property, RDFS.domain, object_class))
ontology.add((inverse_object_property, RDFS.range, OWL.Thing))
ontology.add((inverse_object_property, RDFS.label, MorphUtils.labelize_uriref(inverse_object_property, 'en')))
restriction = BNode()
ontology.add((restriction, RDF.type, OWL.Restriction))
ontology.add((restriction, OWL.onProperty, object_property))
ontology.add((restriction, OWL.someValuesFrom, object_class))
ontology.add((subject_class, RDFS.subClassOf, restriction))
return g, ontology
class RoleType:
PASSIVE = 'PASSIVE'
AGENTIVE = 'AGENTIVE'
CONDITIONAL_AGENTIVE = 'CONDITIONAL_AGENTIVE'
OBLIQUE = 'OBLIQUE'
FRED_ROLE = 'FRED_ROLE'
@staticmethod
def get_role_type(role: URIRef):
role = str(role)
role_type = None
if role in FREDDefaults.ROLES['passive']:
role_type = RoleType.PASSIVE
elif role in FREDDefaults.ROLES['agentive']:
role_type = RoleType.AGENTIVE
elif role in FREDDefaults.ROLES['conditional_agentive']:
role_type = RoleType.CONDITIONAL_AGENTIVE
elif role in FREDDefaults.ROLES['oblique']:
role_type = RoleType.OBLIQUE
elif role.startswith(FREDDefaults.DEFAULT_FRED_NAMESPACE):
role_type = RoleType.FRED_ROLE
return role_type
class RoleMap:
def __init__(self, role: URIRef, ontology_class: URIRef, fred_class: URIRef, role_type: str):
self.__role = role
self.__ontology_class = ontology_class
self.__fred_class = fred_class
self.__role_type = role_type
def get_role(self) -> URIRef:
return self.__role
def get_ontology_class(self) -> URIRef:
return self.__ontology_class
def get_fred_class(self) -> URIRef:
return self.__fred_class
def get_role_type(self) -> str:
return self.__role_type
class SituationDigest:
def __init__(self, source_graph: Graph, situation: URIRef, situation_type: URIRef):
self.__source_graph = source_graph
self.__situation = situation
self.__situation_type = situation_type
self.__passive_roles: List[RoleMap] = []
self.__agentive_roles: List[RoleMap] = []
self.__conditional_agentive_roles: List[RoleMap] = []
self.__oblique_roles: List[RoleMap] = []
self.__fred_roles: List[RoleMap] = []
self.__class_label: str = None
def get_source_graph(self) -> Graph:
return self.__source_graph
def get_situation(self) -> URIRef:
return self.__situation
def get_situation_type(self) -> URIRef:
return self.__situation_type
def add_role_map(self, participant: URIRef, role_type: str):
if role_type == RoleType.AGENTIVE:
self.__agentive_roles.append(participant)
elif role_type == RoleType.PASSIVE:
self.__passive_roles.append(participant)
elif role_type == RoleType.CONDITIONAL_AGENTIVE:
self.__conditional_agentive_roles.append(participant)
elif role_type == RoleType.OBLIQUE:
self.__oblique_roles.append(participant)
elif role_type == RoleType.FRED_ROLE:
self.__fred_roles.append(participant)
def get_participants_of_role_type(self, role_type: str) -> str:
participants = None
if role_type == RoleType.AGENTIVE:
participants = self.__agentive_roles
elif role_type == RoleType.PASSIVE:
participants = self.__passive_roles
elif role_type == RoleType.CONDITIONAL_AGENTIVE:
participants = self.__conditional_agentive_roles
elif role_type == RoleType.OBLIQUE:
participants = self.__oblique_roles
elif role_type == RoleType.FRED_ROLE:
participants = self.__fred_roles
return participants
def get_class_label(self) -> str:
return self.__class_label
def set_class_label(self, class_label: str):
self.__class_label = class_label
def update_class_label(self, class_label: str):
if not self.__class_label:
self.__class_label = ''
self.__class_label = class_label + self.__class_label
def formalise(self, namespace: str) -> Graph:
ontology = Graph()
class_iri = URIRef(namespace+self.__class_label)
ontology.add((class_iri, RDF.type, OWL.Class))
ontology.add((class_iri, RDFS.label, MorphUtils.labelize_uriref(class_iri, 'en')))
ontology += MorphUtils.migrate_taxonomy(self.__source_graph, self.__situation, class_iri, True, RDF.type)
'''
fred_situation_type = self.__situation_type
gerund = MorphUtils.gerundify(fred_situation_type)
gerundive_res = URIRef("".join([namespace, gerund]))
ontology.add((gerundive_res, RDF.type, OWL.Class))
ontology.add((gerundive_res, RDFS.label, MorphUtils.labelize_uriref(gerundive_res)))
ontology.add((class_iri, RDFS.subClassOf, gerundive_res))
'''
role_maps: List[RoleMap] = [*self.__agentive_roles, *self.__passive_roles, *self.__conditional_agentive_roles, *self.__oblique_roles, *self.__fred_roles]
for role_map in role_maps:
role_predicate = role_map.get_role()
role_actor = role_map.get_ontology_class()
fred_class = role_map.get_fred_class()
role_type = role_map.get_role_type()
'''
if fred_class == OWL.Thing:
role_actor = URIRef(''.join([namespace, MorphUtils.get_id(role_predicate)]))
'''
role_actor_iri = str(role_actor)
if role_type == RoleType.FRED_ROLE:
role_id = str(role_actor_iri).replace(namespace, '').replace(FREDDefaults.DEFAULT_FRED_NAMESPACE, '')
predicate_id = str(role_predicate).replace(namespace, '').replace(FREDDefaults.DEFAULT_FRED_NAMESPACE, '')
predicate = URIRef(''.join([namespace, predicate_id, role_id]))
else:
predicate_id = str(role_actor_iri).replace(namespace, '').replace(FREDDefaults.DEFAULT_FRED_NAMESPACE, '')
predicate = URIRef(''.join([namespace, 'involves', predicate_id]))
ontology.add((predicate, RDF.type, OWL.ObjectProperty))
ontology.add((predicate, RDFS.label, MorphUtils.labelize_uriref(predicate)))
restriction = BNode()
ontology.add((restriction, RDF.type, OWL.Restriction))
ontology.add((restriction, OWL.onProperty, predicate))
ontology.add((restriction, OWL.someValuesFrom, role_actor))
ontology.add((class_iri, RDFS.subClassOf, restriction))
ontology.add((predicate, RDFS.domain, OWL.Thing))
ontology.add((predicate, RDFS.range, role_actor))
# Inverse predicate
inverse_predicate = MorphUtils.inverse(predicate)
ontology.add((inverse_predicate, RDF.type, OWL.ObjectProperty))
ontology.add((inverse_predicate, RDFS.label, MorphUtils.labelize_uriref(inverse_predicate)))
ontology.add((inverse_predicate, RDFS.domain, role_actor))
ontology.add((inverse_predicate, RDFS.range, OWL.Thing))
ontology.add((inverse_predicate, OWL.inverseOf, predicate))
ontology.add((predicate, OWL.inverseOf, inverse_predicate))
ontology.add((role_actor, RDF.type, OWL.Class))
ontology.add((role_actor, RDFS.label, MorphUtils.labelize_uriref(role_actor)))
ontology += MorphUtils.migrate_taxonomy(self.__source_graph, fred_class, role_actor)
return ontology
class NAryRelationMorphism(MorphismI):
def __init__(self, ns):
super().__init__(ns)
self.__lemmatizer: WordNetLemmatizer = WordNetLemmatizer()
def morph(self, g: Graph) -> Graph:
ontology = Graph()
'''
We first detect frame occurrences for the base case.
Frame occurrences are detected by querying the graph with the following property path:
?x rdf:type/rdfs:subClassOf* dul:Event
'''
sparql = f'''
PREFIX dul: <http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#>
PREFIX owl: <{OWL._NS}>
PREFIX xsd: <{XSD._NS}>
SELECT ?situation ?situationtype ?role ?participant ?participanttype ?participantsameastype
WHERE{{
?situation a ?situationtype ;
?role ?participant .
OPTIONAL{{
?participant a ?participanttype
FILTER(REGEX(STR(?participanttype), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}') || ?participanttype = owl:Thing)
}}
OPTIONAL{{
{{?participant owl:sameAs/a ?participantsameastype}}
UNION
{{
?situation ?role2 ?participant2
FILTER(datatype(?participant2) = xsd:date || datatype(?participant2) = xsd:dateTime)
BIND(dul:Time AS ?participantsameastype)
}}
}}
?situationtype rdfs:subClassOf+ dul:Event
FILTER(REGEX(STR(?situation), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}'))
FILTER(REGEX(STR(?situationtype), '^{FREDDefaults.DEFAULT_FRED_NAMESPACE}'))
FILTER(REGEX(STR(?role), '^http://www.ontologydesignpatterns.org/ont/') )
}}
'''
resultset = g.query(sparql)
#paths = evalPath(g, (None, RDF.type/(RDFS.subClassOf*OneOrMore), URIRef('http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#Event')))
#for path in paths:
situations_registry: Dict[URIRef, SituationDigest] = dict()
for row in resultset:
situation = row.situation
situation_type = row.situationtype
role = row.role
role_type = RoleType.get_role_type(role)
if row.participanttype == OWL.Thing:
if row.participantsameastype:
participant_type = row.participantsameastype
else:
participant_type = role
else:
if row.participanttype:
participant_type = row.participanttype
elif row.participantsameastype:
participant_type = row.participantsameastype
else:
participant_type = None
if participant_type and role_type:
if situation in situations_registry:
situation_digest = situations_registry[situation]
else:
situation_digest = SituationDigest(g, situation, situation_type)
situations_registry.update({situation: situation_digest})
situation_digest.set_class_label(MorphUtils.gerundify(situation_type))
local_class_id = MorphUtils.get_id(participant_type)
ontology_class = URIRef(self._ns + local_class_id)
role_map = RoleMap(role, ontology_class, participant_type, role_type)
situation_digest.add_role_map(role_map, role_type)
if role_type == RoleType.PASSIVE:
situation_digest.update_class_label(local_class_id)
for situation, digest in situations_registry.items():
ontology += digest.formalise(self._ns)
return g, ontology
class Frodo:
def __init__(self, namespace: str, fred_uri: str, fred_key: str, morphisms: Tuple[MorphismI] = None):
self.__g: Graph = None
self.__ns: Namespace = Namespace(namespace)
self.__fred_uri = fred_uri
self.__fred_key = fred_key
defaultMorphisms = (
TaxonomyMorphism(self.__ns),
BinaryRelationMorphism(self.__ns),
NAryRelationMorphism(self.__ns)
)
self.__morphisms: Tuple[MorphismI] = morphisms if morphisms else defaultMorphisms
def get_namespace(self) -> str:
return self.__ns
def set_namespace(self, namespace: str):
self.__ns = namespace
def generate(self, cq: str, ontology_id: str = None) -> Graph:
if not ontology_id:
ontology_id = self.__ns
fredclient = FREDClient(self.__fred_uri, key=self.__fred_key)
self.__g = fredclient.execute_request(cq, FREDParameters(semantic_subgraph=True))
ontology = Graph()
ontology.add((URIRef(ontology_id), RDF.type, OWL.Ontology))
ontology.bind("owl", Namespace('http://www.w3.org/2002/07/owl#'))
ontology.bind("rdf", Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#'))
ontology.bind("rdfs", Namespace('http://www.w3.org/2000/01/rdf-schema#'))
ontology.bind("", Namespace(self.__ns))
for morphism in self.__morphisms:
self.__g, new_axioms = morphism.morph(self.__g)
ontology += new_axioms
triples_to_del = []
for s, _, o in ontology.triples((None, RDFS.subClassOf, None)):
if s == o:
triples_to_del.append((s, RDFS.subClassOf, o))
for triple in triples_to_del:
ontology.remove(triple)
return ontology
| 22,735 | 37.601019 | 161 | py |
frodo | frodo-main/frodo/__init__.py | from frodo.frodo import *
| 26 | 12.5 | 25 | py |
minkasi | minkasi-master/setup.py | from setuptools import Extension, setup
import ctypes
import subprocess
import os
try:
mylib=ctypes.cdll.LoadLibrary("libminkasi.so")
except OSError:
os.environ["prefix"] = "minkasi"
subprocess.check_call(["make", "-e", "libminkasi"])
try:
mylib=ctypes.cdll.LoadLibrary("libmkfftw.so")
except OSError:
os.environ["prefix"] = "minkasi"
subprocess.check_call(["make", "-e", "libmkfftw"])
setup(
name='minkasi',
version='1.0.0',
install_requires=[
'requests',
'importlib-metadata',
'numpy',
'astropy',
'scipy'
],
packages=['minkasi'],
package_data={"minkasi": ["*.so"]},
)
| 642 | 20.433333 | 55 | py |
minkasi | minkasi-master/examples/minkasi_mpi_example.py | import numpy
from matplotlib import pyplot as plt
import minkasi,pyfftw
import time
import glob
reload(minkasi)
plt.ion()
#find tod files we want to map
dir='../data/m0717_raw/'
tod_names=glob.glob(dir+'*.fits')
#if running MPI, you would want to split up files between processes
#one easy way is to say to this:
tod_names=tod_names[minkasi.myrank::minkasi.nproc]
#NB - minkasi checks to see if MPI is around, if not
#it sets rank to 0 an nproc to 1, so this would still
#run in a non-MPI environment
todvec=minkasi.TodVec()
#loop over each file, and read it.
for fname in tod_names:
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
#the length happy for ffts
minkasi.downsample_tod(dat) #sometimes we have faster sampled data than we need.
#this fixes that. You don't need to, though.
minkasi.truncate_tod(dat) #since our length changed, make sure we have a happy length
#figure out a guess at common mode #and (assumed) linear detector drifts/offset
#drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd=minkasi.fit_cm_plus_poly(dat['dat_calib'])
dat['dat_calib']=dd
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
#make a template map with desired pixel size an limits that cover the data
#todvec.lims() is MPI-aware and will return global limits, not just
#the ones from private TODs
lims=todvec.lims()
pixsize=3.0/3600*numpy.pi/180
map=minkasi.SkyMap(lims,pixsize)
#once we have a map, we can figure out the pixellization of the data. Save that
#so we don't have to recompute. Also calculate a noise model. The one here
#(and currently the only supported one) is to rotate the data into SVD space, then
#smooth the power spectrum of each mode. Other models would not be hard
#to implement. The smoothing could do with a bit of tuning as well.
for tod in todvec.tods:
ipix=map.get_pix(tod)
tod.info['ipix']=ipix
tod.set_noise_smoothed_svd()
#get the hit count map. We use this as a preconditioner
#which helps small-scale convergence quite a bit.
hits=minkasi.make_hits(todvec,map)
#setup the mapset. In general this can have many things
#in addition to map(s) of the sky, but for now we'll just
#use a single skymap.
mapset=minkasi.Mapset()
mapset.add_map(map)
#make A^T N^1 d. TODs need to understand what to do with maps
#but maps don't necessarily need to understand what to do with TODs,
#hence putting make_rhs in the vector of TODs.
#Again, make_rhs is MPI-aware, so this should do the right thing
#if you run with many processes.
rhs=mapset.copy()
todvec.make_rhs(rhs)
#this is our starting guess. Default to starting at 0,
#but you could start with a better guess if you have one.
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
#precon.maps[0].map[:]=numpy.sqrt(tmp)
precon.maps[0].map[:]=tmp[:]
#run PCG!
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=50)
if minkasi.myrank==0:
mapset_out.maps[0].write('first_map_precon_mpi.fits') #and write out the map as a FITS file
else:
print('not writing map on process ',minkasi.myrank)
#if you wanted to run another round of PCG starting from the previous solution,
#you could, replacing x0 with mapset_out.
#mapset_out2=minkasi.run_pcg(rhs,mapset_out,todvec,mapset,maxiter=50)
#mapset_out2.maps[0].write('second_map.fits')
| 3,708 | 34.32381 | 95 | py |
minkasi | minkasi-master/examples/tsBowl_fitter.py | import minkasi
import numpy as np
from matplotlib import pyplot as plt
import time
fname = '/scratch/r/rbond/jorlo/MS0735/TS_EaCMS0f0_51_5_Oct_2021/Signal_TOD-AGBT19A_092_08-s26.fits'
dat = minkasi.read_tod_from_fits(fname)
minkasi.truncate_tod(dat)
# figure out a guess at common mode and (assumed) linear detector drifts/offset
# drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd, pred2, cm = minkasi.fit_cm_plus_poly(dat["dat_calib"], cm_ord=3, full_out=True)
dat["dat_calib"] = dd
dat["pred2"] = pred2
dat["cm"] = cm
flatten = True
if flatten:
dat['dat_calib'] -= pred2
tod = minkasi.Tod(dat)
tod.set_apix()
todvec=minkasi.TodVec()
todvec.add_tod(tod)
for tod in todvec.tods:
tod.set_noise(minkasi.NoiseSmoothedSVD)
tsBowl = minkasi.tsBowl(tod)
mapset=minkasi.Mapset()
mapset.add_map(tsBowl)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
t1=time.time()
mapset_out=minkasi.run_pcg(rhs,x0,todvec,maxiter=50)
t2 = time.time()
print('Took {} sec to fit 1 tod'.format(t2-t1))
print(mapset_out.maps[0].params[0])
plt.plot(tod.info['apix'][0], tod.info['dat_calib'][0])
plt.plot(tod.info['apix'][0], np.dot(mapset_out.maps[0].params[0], mapset_out.maps[0].vecs[0].T))
plt.ylim(-0.05, 0.05)
plt.xlabel('apix')
if flatten:
plt.ylabel('dat_calib - pred2')
else:
plt.ylabel('dat_calib')
plt.show()
| 1,384 | 20.984127 | 100 | py |
minkasi | minkasi-master/examples/zw3146_tsBowl.py | #This is a template script to show how to fit multi-component models
#directly to timestreams. The initial part (where the TODs, noise model etc.)
#are set up is the same as general mapping scripts, although we don't
#need to bother with setting a map/pixellization in general (although if
#your timestream model used a map you might). This script works under
#MPI as well.
import numpy as np
from matplotlib import pyplot as plt
import minkasi
import time
import glob
from importlib import reload
reload(minkasi)
plt.ion()
#find tod files we want to map
outroot='maps/zw3146/zw3146'
dir='../data/Zw3146/'
tod_names=glob.glob(dir+'Sig*.fits')
tod_names.sort() #make sure everyone agrees on the order of the file names
tod_names=tod_names[minkasi.myrank::minkasi.nproc]
todvec=minkasi.TodVec()
#loop over each file, and read it.
for fname in tod_names:
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
dd=minkasi.fit_cm_plus_poly(dat['dat_calib'])
dat['dat_calib']=dd
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
minkasi.barrier()
lims=todvec.lims()
pixsize=2.0/3600*numpy.pi/180
map=minkasi.SkyMap(lims,pixsize)
for tod in todvec.tods:
ipix=map.get_pix(tod) #don't need pixellization for a timestream fit...
tod.info['ipix']=ipix
tod.set_noise_smoothed_svd()
tod.set_noise(minkasi.NoiseSmoothedSVD)
tsVec = minkasi.tsModel(todvec = todvec, modelclass = minkasi.tsBowl)
#We add two things for pcg to do simulatenously here: make the maps from the tods
#and fit the polynomials to tsVec
mapset.add_map(map)
mapset.add_map(tsVec)
hits=minkasi.make_hits(todvec,map)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
precon.maps[0].map[:]=tmp[:]
#tsBowl precon is 1/todlength
if len(mapset.maps) > 1:
for key in precon.maps[1].data.keys():
temp = np.ones(precon.maps[1].data[key].params.shape)
temp /= precon.maps[1].data[key].vecs.shape[1]
precon.maps[1].data[key].params = temp
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=50)
if minkasi.myrank==0:
if len(mapset.maps)==1:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/noBowl_map_precon_mpi_py3.fits') #and write out the map as a FITS file
else:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/tsBowl_map_precon_mpi_py3.fits')
else:
print('not writing map on process ',minkasi.myrank)
if minkasi.nproc>1:
minkasi.MPI.Finalize()
d2r=np.pi/180
sig=9/2.35/3600*d2r
theta_0=40/3600*d2r
beta_pars=np.asarray([155.91355*d2r,4.1877*d2r,theta_0,0.7,-8.2e-4])
src1_pars=np.asarray([155.9374*d2r,4.1775*d2r,3.1e-5,9.15e-4])
src2_pars=np.asarray([155.90447*d2r,4.1516*d2r,2.6e-5,5.1e-4])
pars=np.hstack([beta_pars,src1_pars,src2_pars]) #we need to combine parameters into a single vector
npar=np.hstack([len(beta_pars),len(src1_pars),len(src2_pars)]) #and the fitter needs to know how many per function
#this array of functions needs to return the model timestreams and derivatives w.r.t. parameters
#of the timestreams.
funs=[minkasi.derivs_from_isobeta_c,minkasi.derivs_from_gauss_c,minkasi.derivs_from_gauss_c]
#we can keep some parameters fixed at their input values if so desired.
to_fit=np.ones(len(pars),dtype='bool')
to_fit[3]=False #Let's keep beta pegged to 0.7
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('No Sub: ')
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
minkasi.comm.barrier()
for tod in todvec.tods():
todname = tod.info['fname']
tod.info['dat_calib'] -= np.dot(mapset_out.maps[1].data[fname].params, mapset_out[1].data[fname].vecs.T)
| 4,147 | 30.907692 | 127 | py |
minkasi | minkasi-master/examples/fit_zw3146_multi_component.py | #This is a template script to show how to fit multi-component models
#directly to timestreams. The initial part (where the TODs, noise model etc.)
#are set up is the same as general mapping scripts, although we don't
#need to bother with setting a map/pixellization in general (although if
#your timestream model used a map you might). This script works under
#MPI as well.
import numpy
import numpy as np
from matplotlib import pyplot as plt
import minkasi
import time
import glob
from importlib import reload
reload(minkasi)
plt.ion()
#find tod files we want to map
outroot='maps/zw3146/zw3146'
dir='../data/Zw3146/'
#dir='../data/moo1110/'
#dir='../data/moo1046/'
tod_names=glob.glob(dir+'Sig*.fits')
tod_names.sort() #make sure everyone agrees on the order of the file names
#tod_names=tod_names[:20] #you can cut the number of TODs here for testing purposes
#if running MPI, you would want to split up files between processes
#one easy way is to say to this:
tod_names=tod_names[minkasi.myrank::minkasi.nproc]
#NB - minkasi checks to see if MPI is around, if not
#it sets rank to 0 an nproc to 1, so this would still
#run in a non-MPI environment
todvec=minkasi.TodVec()
#loop over each file, and read it.
for fname in tod_names:
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
#the length happy for ffts
#minkasi.downsample_tod(dat) #sometimes we have faster sampled data than we need.
# #this fixes that. You don't need to, though.
#minkasi.truncate_tod(dat) #since our length changed, make sure we have a happy length#
#
#figure out a guess at common mode #and (assumed) linear detector drifts/offset
#drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd=minkasi.fit_cm_plus_poly(dat['dat_calib'])
dat['dat_calib']=dd
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
#make a template map with desired pixel size an limits that cover the data
#todvec.lims() is MPI-aware and will return global limits, not just
#the ones from private TODs
lims=todvec.lims()
pixsize=2.0/3600*numpy.pi/180
#map=minkasi.SkyMap(lims,pixsize) #we don't need a map when fitting timestreams
#once we have a map, we can figure out the pixellization of the data. Save that
#so we don't have to recompute. Also calculate a noise model. The one here
#is to rotate the data into SVD space, then smooth the power spectrum of each mode.
# The smoothing could do with a bit of tuning..
for tod in todvec.tods:
#ipix=map.get_pix(tod) #don't need pixellization for a timestream fit...
#tod.info['ipix']=ipix
#tod.set_noise_smoothed_svd()
tod.set_noise(minkasi.NoiseSmoothedSVD)
#we need an initial guess since this fitting routine is
#for nonlinear models. This guess came from looking
#at a map/some initial fits. The better the guess, the
#faster the convergence.
d2r=np.pi/180
sig=9/2.35/3600*d2r
theta_0=40/3600*d2r
beta_pars=np.asarray([155.91355*d2r,4.1877*d2r,theta_0,0.7,-8.2e-4])
src1_pars=np.asarray([155.9374*d2r,4.1775*d2r,3.1e-5,9.15e-4])
src2_pars=np.asarray([155.90447*d2r,4.1516*d2r,2.6e-5,5.1e-4])
pars=np.hstack([beta_pars,src1_pars,src2_pars]) #we need to combine parameters into a single vector
npar=np.hstack([len(beta_pars),len(src1_pars),len(src2_pars)]) #and the fitter needs to know how many per function
#this array of functions needs to return the model timestreams and derivatives w.r.t. parameters
#of the timestreams.
funs=[minkasi.derivs_from_isobeta_c,minkasi.derivs_from_gauss_c,minkasi.derivs_from_gauss_c]
#we can keep some parameters fixed at their input values if so desired.
to_fit=np.ones(len(pars),dtype='bool')
to_fit[3]=False #Let's keep beta pegged to 0.7
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
minkasi.comm.barrier()
| 4,311 | 38.2 | 114 | py |
minkasi | minkasi-master/examples/minkasi_map_moo_python3.py | import numpy
import numpy as np
from matplotlib import pyplot as plt
import minkasi
import time
import glob
from importlib import reload
reload(minkasi)
plt.ion()
def smooth_map(map,npix=3):
n=map.shape[0]
m=map.shape[1]
v1=np.fft.fftfreq(n)*n
v2=np.fft.fftfreq(m)*m
rmat=np.outer(v1,np.ones(m))**2+np.outer(np.ones(n),v2)**2
kernel=np.exp(-0.5*rmat/npix**2)
mapft=np.fft.rfft2(map)
kft=np.fft.rfft2(kernel)
return np.fft.irfft2(mapft*kft/kft[0,0])
#assert(1==0)
#find tod files we want to map
#dir='../data/moo1110/'
#dir='/Users/sievers/mustang/data/moo1110/'
#dir='../data/moo1046/'
dir = '/scratch/s/sievers/skh/tods/MOO1142/TS_EaCMS0f0_51_16_Feb_2022/'
tod_names=glob.glob(dir+'Sig*.fits')
#if running MPI, you would want to split up files between processes
#one easy way is to say to this:
tod_names=tod_names[minkasi.myrank::minkasi.nproc]
#NB - minkasi checks to see if MPI is around, if not
#it sets rank to 0 an nproc to 1, so this would still
#run in a non-MPI environment
todvec=minkasi.TodVec()
#loop over each file, and read it.
for fname in tod_names:
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
#the length happy for ffts
minkasi.downsample_tod(dat) #sometimes we have faster sampled data than we need.
#this fixes that. You don't need to, though.
minkasi.truncate_tod(dat) #since our length changed, make sure we have a happy length
#figure out a guess at common mode #and (assumed) linear detector drifts/offset
#drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd=minkasi.fit_cm_plus_poly(dat['dat_calib'])
dat['dat_calib']=dd
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
#make a template map with desired pixel size an limits that cover the data
#todvec.lims() is MPI-aware and will return global limits, not just
#the ones from private TODs
lims=todvec.lims()
pixsize=2.0/3600*numpy.pi/180
map=minkasi.SkyMap(lims,pixsize)
#once we have a map, we can figure out the pixellization of the data. Save that
#so we don't have to recompute. Also calculate a noise model. The one here
#(and currently the only supported one) is to rotate the data into SVD space, then
#smooth the power spectrum of each mode. Other models would not be hard
#to implement. The smoothing could do with a bit of tuning as well.
for tod in todvec.tods:
ipix=map.get_pix(tod)
tod.info['ipix']=ipix
#tod.set_noise_smoothed_svd()
tod.set_noise(minkasi.NoiseSmoothedSVD)
'''
inds=[0,1,-1]
for ind in inds:
tod=todvec.tods[ind]
plt.clf();
dd=tod.info['dat_calib'].copy()
tvec=np.arange(dd.shape[1])*tod.info['dt']
plt.plot(tvec,dd.T)
plt.xlabel("Time (s)")
plt.ylabel("Detector Temperature (K)")
tt=tod.info['fname']
mystr=tod.info['fname']
i1=mystr.find('-');i2=mystr.find('.',3);tag=mystr[i1+1:i2]
plt.savefig('all_detectors_' + tag +'.png')
plt.clf();
dat_filt=tod.apply_noise(tod.info['dat_calib'])
plt.clf();
plt.plot(tvec,dat_filt[::10,:].T/np.sqrt(tod.info['dt'])/1000)
plt.savefig('noise_filtered_dets_'+tag+'.png')
#assert(1==0)
'''
#get the hit count map. We use this as a preconditioner
#which helps small-scale convergence quite a bit.
hits=minkasi.make_hits(todvec,map)
#setup the mapset. In general this can have many things
#in addition to map(s) of the sky, but for now we'll just
#use a single skymap.
mapset=minkasi.Mapset()
mapset.add_map(map)
#make A^T N^1 d. TODs need to understand what to do with maps
#but maps don't necessarily need to understand what to do with TODs,
#hence putting make_rhs in the vector of TODs.
#Again, make_rhs is MPI-aware, so this should do the right thing
#if you run with many processes.
rhs=mapset.copy()
todvec.make_rhs(rhs)
#this is our starting guess. Default to starting at 0,
#but you could start with a better guess if you have one.
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
#precon.maps[0].map[:]=numpy.sqrt(tmp)
precon.maps[0].map[:]=tmp[:]
#run PCG!
plot_info={}
plot_info['vmin']=-6e-4
plot_info['vmax']=6e-4
plot_iters=[1,2,3,5,10,15,20,25,30,35,40,45,49]
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=50)#,plot_iters=plot_iters,plot_info=plot_info)
if minkasi.myrank==0:
print(type(mapset_out.maps[0]))
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/first_map_precon_mpi_py3.fits') #and write out the map as a FITS file
else:
print('not writing map on process ',minkasi.myrank)
if minkasi.nproc>1:
minkasi.MPI.Finalize()
#if you wanted to run another round of PCG starting from the previous solution,
#you could, replacing x0 with mapset_out.
#mapset_out2=minkasi.run_pcg(rhs,mapset_out,todvec,mapset,maxiter=50)
#mapset_out2.maps[0].write('second_map.fits')
| 5,186 | 30.628049 | 122 | py |
minkasi | minkasi-master/examples/tsBowl_map_maker.py | import minkasi
import numpy as np
from matplotlib import pyplot as plt
import glob
import time
import minkasi_jax.presets_by_source as pbs
import os
from astropy.coordinates import Angle
from astropy import units as u
dir = '/scratch/r/rbond/jorlo/MS0735//TS_EaCMS0f0_51_5_Oct_2021/'
tod_names=glob.glob(dir+'Sig*.fits')
bad_tod, addtag = pbs.get_bad_tods("MS0735", ndo=False, odo=False)
#bad_tod.append('Signal_TOD-AGBT21A_123_03-s20.fits')
tod_names = minkasi.cut_blacklist(tod_names, bad_tod)
print("nproc: ", minkasi.nproc)
tod_names = tod_names[minkasi.myrank::minkasi.nproc]
todvec=minkasi.TodVec()
flatten = True
#loop over each file, and read it.
for i, fname in enumerate(tod_names):
if fname == '/scratch/r/rbond/jorlo/MS0735//TS_EaCMS0f0_51_5_Oct_2021/Signal_TOD-AGBT21A_123_03-s20.fits': continue
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
#the length happy for ffts
minkasi.downsample_tod(dat) #sometimes we have faster sampled data than we need.
#this fixes that. You don't need to, though.
minkasi.truncate_tod(dat) #since our length changed, make sure we have a happy length
#figure out a guess at common mode #and (assumed) linear detector drifts/offset
#drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd, pred2, cm = minkasi.fit_cm_plus_poly(dat["dat_calib"], cm_ord=3, full_out=True)
dat['dat_calib']=dd
if flatten:
dat['dat_calib'] -= pred2
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
minkasi.barrier()
lims=todvec.lims()
pixsize=2.0/3600*np.pi/180
map=minkasi.SkyMap(lims,pixsize)
mapset = minkasi.Mapset()
for i, tod in enumerate(todvec.tods):
#print(i)
#print(tod.info['fname'])
ipix=map.get_pix(tod)
tod.info['ipix']=ipix
try:
tod.set_noise(minkasi.NoiseSmoothedSVD)
except:
print(i, tod.info['fname'])
tsVec = minkasi.tsModel(todvec = todvec, modelclass = minkasi.tsBowl)
#We add two things for pcg to do simulatenously here: make the maps from the tods
#and fit the polynomials to tsVec
mapset.add_map(map)
mapset.add_map(tsVec)
hits=minkasi.make_hits(todvec,map)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
precon.maps[0].map[:]=tmp[:]
#tsBowl precon is 1/todlength
if len(mapset.maps) > 1:
for key in precon.maps[1].data.keys():
temp = np.ones(precon.maps[1].data[key].params.shape)
temp /= precon.maps[1].data[key].vecs.shape[1]
precon.maps[1].data[key].params = temp
todvec_copy = minkasi.TodVec()
for tod in todvec.tods:
todvec_copy.add_tod(tod.copy())
#run PCG!
plot_info={}
plot_info['vmin']=-6e-4
plot_info['vmax']=6e-4
plot_iters=[1,2,3,5,10,15,20,25,30,35,40,45,49]
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=200)
if minkasi.myrank==0:
if len(mapset.maps)==1:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_noBowl_map_precon_mpi_py3.fits'.format(flatten)) #and write out the map as a FITS file
else:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_tsBowl_map_precon_mpi_py3.fits'.format(flatten))
else:
print('not writing map on process ',minkasi.myrank)
#if you wanted to run another round of PCG starting from the previous solution,
#you could, replacing x0 with mapset_out.
#mapset_out2=minkasi.run_pcg(rhs,mapset_out,todvec,mapset,maxiter=50)
#mapset_out2.maps[0].write('second_map.fits')
d2r=np.pi/180
sig=9/2.35/3600*d2r
theta0 = np.deg2rad(97)
x0 = Angle('07 41 44.5 hours').to(u.radian).value
y0 = Angle('74:14:38.7 degrees').to(u.radian).value
beta_pars=np.asarray([x0,y0,theta0,0.98,-8.2e-1])
x0_src = Angle('07 41 44.5 hours').to(u.radian).value
y0_src = Angle('74:14:38.7 degrees').to(u.radian).value
src1_pars=np.asarray([x0_src, y0_src,1.37e-5,1.7e-4])
#src2_pars=np.asarray([155.90447*d2r,4.1516*d2r,2.6e-5,5.1e-4])
pars=np.hstack([beta_pars,src1_pars]) #we need to combine parameters into a single vector
npar=np.hstack([len(beta_pars),len(src1_pars)]) #and the fitter needs to know how many per function
#this array of functions needs to return the model timestreams and derivatives w.r.t. parameters
#of the timestreams.
funs=[minkasi.derivs_from_isobeta_c,minkasi.derivs_from_gauss_c]
#we can keep some parameters fixed at their input values if so desired.
to_fit=np.ones(len(pars),dtype='bool')
to_fit[[0,1,2,5,6]]=False #Let's keep beta pegged to 0.7
'''
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec_copy,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('No Sub: ')
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
'''
minkasi.comm.barrier()
if minkasi.myrank==0:
for tod in todvec.tods:
todname = tod.info['fname']
temp = minkasi.map2todbowl(mapset_out.maps[1].data[todname].vecs, mapset_out.maps[1].data[todname].params)
tod.info['dat_calib'] -= temp
'''
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('Sub: ')
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
'''
lims=todvec.lims()
pixsize=2.0/3600*np.pi/180
map=minkasi.SkyMap(lims,pixsize)
mapset = minkasi.Mapset()
mapset.add_map(map)
hits=minkasi.make_hits(todvec,map)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
precon.maps[0].map[:]=tmp[:]
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=50)
if minkasi.myrank==0:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_sub_tsBowl_map_precon_mpi_py3.fits'.format(flatten)) #and write out the map as a FITS file
else:
print('not writing map on process ',minkasi.myrank)
| 6,463 | 28.788018 | 146 | py |
minkasi | minkasi-master/minkasi/code_scrapyard.py | def __run_pcg_old(b,x0,tods,mapset,precon):
Ax=mapset.dot(x0)
r=b-Ax
z=precon*r
p=z.copy()
k=0
zr=r.dot(z)
x=x0.copy()
for iter in range(25):
print(iter,zr)
Ap=mapset.dot(p)
pAp=p.dot(Ap)
alpha=zr/pAp
x_new=x+p*alpha
r_new=r-Ap*alpha
z_new=precon*r_new
zr_new=r_new.dot(z_new)
beta=zr_new/zr
p_new=z_new+p*beta
p=p_new
z=z_new
r=r_new
zr=zr_new
x=x_new
return x
def run_pcg_wprior_old(b,x0,tods,prior,precon=None,maxiter=25):
t1=time.time()
Ax=tods.dot(x0)
#prior.apply_prior(Ax,x0)
flub=prior.apply_prior(x0.maps[0].map)
print('means of flub and Ax are ',np.mean(np.abs(Ax.maps[0].map)),np.mean(np.abs(flub)))
Ax.maps[0].map=Ax.maps[0].map+prior.apply_prior(x0.maps[0].map)
try:
r=b.copy()
r.axpy(Ax,-1)
except:
r=b-Ax
if not(precon is None):
z=precon*r
else:
z=r.copy()
p=z.copy()
k=0.0
zr=r.dot(z)
x=x0.copy()
t2=time.time()
for iter in range(maxiter):
if myrank==0:
if iter>0:
print(iter,zr,alpha,t2-t1,t3-t2,t3-t1)
else:
print(iter,zr,t2-t1)
t1=time.time()
Ap=tods.dot(p)
fwee=prior.apply_prior(p.maps[0].map)
#print 'means are ',np.mean(np.abs(Ap.maps[0].map)),np.mean(np.abs(fwee))
Ap.maps[0].map=Ap.maps[0].map+fwee
#prior.apply_prior(Ap,p)
t2=time.time()
pAp=p.dot(Ap)
alpha=zr/pAp
try:
x_new=x.copy()
x_new.axpy(p,alpha)
except:
x_new=x+p*alpha
try:
r_new=r.copy()
r_new.axpy(Ap,-alpha)
except:
r_new=r-Ap*alpha
if not(precon is None):
z_new=precon*r_new
else:
z_new=r_new.copy()
zr_new=r_new.dot(z_new)
beta=zr_new/zr
try:
p_new=z_new.copy()
p_new.axpy(p,beta)
except:
p_new=z_new+p*beta
p=p_new
z=z_new
r=r_new
zr=zr_new
x=x_new
t3=time.time()
return x
class tsStripes_old(tsGeneric):
def __init__(self,tod,seg_len=100,do_slope=True):
dims=tod.get_data_dims()
nseg=dims[1]//seg_len
if nseg*seg_len<dims[1]:
nseg=nseg+1
#this says to connect segments with straight lines as
#opposed to simple horizontal offsets
if do_slope:
nseg=nseg+1
self.nseg=nseg
self.seg_len=seg_len
self.params=np.zeros([dims[0],self.nseg])
self.do_slope=do_slope
def tod2map(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
dat=tod.get_data()
tmp=np.zeros(self.params.shape)
if self.do_slope:
vec=np.arange(self.seg_len)/self.seg_len
vec2=1-vec
vv=np.vstack([vec2,vec])
nseg=dat.shape[1]//self.seg_len
imax=nseg
if nseg*self.seg_len<dat.shape[1]:
have_extra=True
nextra=dat.shape[1]-imax*self.seg_len
else:
have_extra=False
nseg=nseg-1
nextra=0
for i in range(imax):
#tmp[:,i:i+2]=tmp[:,i:i+2]+dat[:,i*self.seg_len:(i+1)*self.seg_len]@(vv.T)
tmp[:,i:i+2]=tmp[:,i:i+2]+np.dot(dat[:,i*self.seg_len:(i+1)*self.seg_len],(vv.T))
if have_extra:
vec=np.arange(nextra)/nextra
vec2=1-vec
vv=np.vstack([vec2,vec])
#tmp[:,-2:]=tmp[:,-2:]+dat[:,self.seg_len*nseg:]@(vv.T)
tmp[:,-2:]=tmp[:,-2:]+np.dot(dat[:,self.seg_len*nseg:],(vv.T))
else:
nseg=dat.shape[1]//self.seg_len
if nseg*self.seg_len<dat.shape[1]:
nseg=nseg+1
vec=np.zeros(nseg*self.seg_len)
ndet=dat.shape[0]
ndat=dat.shape[1]
for i in range(ndet):
vec[:ndat]=dat[i,:]
vv=np.reshape(vec,[nseg,self.seg_len])
tmp[i,:]=np.sum(vv,axis=1)
if do_add:
self.params[:]=self.params[:]+tmp
else:
self.params[:]=tmp
def map2tod(self,tod,dat=None,do_add=True,do_omp=False):
tmp=tod.get_empty()
ndet=tmp.shape[0]
ndat=tmp.shape[1]
if self.do_slope:
vec=np.arange(self.seg_len)/self.seg_len
vec2=1-vec
vv=np.vstack([vec2,vec])
nseg=tmp.shape[1]//self.seg_len
imax=nseg
if imax*self.seg_len<tmp.shape[1]:
have_extra=True
nextra=tmp.shape[1]-imax*self.seg_len
else:
have_extra=False
nseg=nseg-1
#imax=imax+1
for i in range(imax):
#tmp[:,i*self.seg_len:(i+1)*self.seg_len]=self.params[:,i:i+2]@vv
tmp[:,i*self.seg_len:(i+1)*self.seg_len]=np.dot(self.params[:,i:i+2],vv)
if have_extra:
vec=np.arange(nextra)/nextra
vec2=1-vec
vv=np.vstack([vec2,vec])
#tmp[:,self.seg_len*nseg:]=self.params[:,-2:]@vv
tmp[:,self.seg_len*nseg:]=np.dot(self.params[:,-2:],vv)
else:
ndet=tmp.shape[0]
ndat=tmp.shape[1]
for i in range(ndet):
pars=self.params[i,:]
mymod=np.repeat(pars,self.seg_len)
tmp[i,:]=mymod[:ndat]
if dat is None:
dat=tmp
return dat
else:
if do_add:
dat[:]=dat[:]+tmp
else:
dat[:]=tmp
class __tsAirmass_old:
def __init__(self,tod=None,order=5):
if tod is None:
self.sz=np.asarray([0,0],dtype='int')
self.params=np.zeros(1)
self.fname=''
self.order=0
self.airmass=None
else:
#self.sz=tod.info['dat_calib'].shape
self.sz=tod.get_data_dims()
self.fname=tod.info['fname']
self.order=order
self.params=np.zeros(order+1)
self.airmass=scaled_airmass_from_el(tod.info['elev'])
def copy(self,copyMat=False):
cp=tsAirmass()
cp.sz=self.sz
cp.params=self.params.copy()
cp.fname=self.fname
cp.order=self.order
if copyMat:
cp.airmass=self.airmass.copy()
else:
cp.airmass=self.airmass #since this shouldn't change, use a pointer to not blow up RAM
return cp
def clear(self):
self.params[:]=0.0
def dot(self,ts):
return np.sum(self.params*ts.params)
def axpy(self,ts,a):
self.params=self.params+a*ts.params
def _get_current_legmat(self):
x=np.linspace(-1,1,self.sz[1])
m1=np.polynomial.legendre.legvander(x,self.order)
return m1
def _get_current_model(self):
x=np.linspace(-1,1,self.sz[1])
m1=self._get_current_legmat()
poly=np.dot(m1,self.params)
mat=np.repeat([poly],self.sz[0],axis=0)
mat=mat*self.airmass
return mat
def tod2map(self,tod,dat,do_add=True,do_omp=False):
poly=self._get_current_legmat()
vec=np.sum(dat*self.airmass,axis=0)
atd=np.dot(vec,poly)
if do_add:
self.params[:]=self.params[:]+atd
else:
self.params[:]=atd
def map2tod(self,tod,dat,do_add=True,do_omp=False):
mat=self._get_current_model()
if do_add:
dat[:]=dat[:]+mat
else:
dat[:]=mat
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
def write(self,fname=None):
pass
def _fit_timestreams_with_derivs_old(func,pars,tods,to_fit=None,to_scale=None,tol=1e-2,maxiter=10,scale_facs=None):
'''Fit a model to timestreams. func should return the model and the derivatives evaluated at
the parameter values in pars. to_fit says which parameters to float. 0 to fix, 1 to float, and anything
larger than 1 is expected to vary together (e.g. shifting a TOD pointing mode you could put in a 2 for all RA offsets
and a 3 for all dec offsets.). to_scale will normalize
by the input value, so one can do things like keep relative fluxes locked together.'''
if not(to_fit is None):
#print 'working on creating rotmat'
to_fit=np.asarray(to_fit,dtype='int64')
inds=np.unique(to_fit)
nfloat=np.sum(to_fit==1)
ncovary=np.sum(inds>1)
nfit=nfloat+ncovary
rotmat=np.zeros([len(pars),nfit])
solo_inds=np.where(to_fit==1)[0]
icur=0
for ind in solo_inds:
rotmat[ind,icur]=1.0
icur=icur+1
if ncovary>0:
group_inds=inds[inds>1]
for ind in group_inds:
ii=np.where(to_fit==ind)[0]
rotmat[ii,icur]=1.0
icur=icur+1
iter=0
converged=False
pp=pars.copy()
while (converged==False) and (iter<maxiter):
curve=0.0
grad=0.0
chisq=0.0
for tod in tods.tods:
#sz=tod.info['dat_calib'].shape
sz=tod.get_data_dims()
derivs,pred=func(pp,tod)
if not (to_fit is None):
derivs=np.dot(rotmat.transpose(),derivs)
derivs_filt=0*derivs
tmp=np.zeros(sz)
npp=derivs.shape[0]
nn=derivs.shape[1]
#delt=tod.info['dat_calib']-pred
delt=tod.get_data()-pred
delt_filt=tod.apply_noise(delt)
for i in range(npp):
tmp[:,:]=np.reshape(derivs[i,:],sz)
tmp_filt=tod.apply_noise(tmp)
derivs_filt[i,:]=np.reshape(tmp_filt,nn)
delt=np.reshape(delt,nn)
delt_filt=np.reshape(delt_filt,nn)
grad1=np.dot(derivs,delt_filt)
grad2=np.dot(derivs_filt,delt)
grad=grad+0.5*(grad1+grad2)
curve=curve+np.dot(derivs,derivs_filt.transpose())
chisq=chisq+np.dot(delt,delt_filt)
if iter==0:
chi_ref=chisq
curve=0.5*(curve+curve.transpose())
curve=curve+2.0*np.diag(np.diag(curve)) #double the diagonal for testing purposes
curve_inv=np.linalg.inv(curve)
errs=np.sqrt(np.diag(curve_inv))
shifts=np.dot(curve_inv,grad)
#print errs,shifts
conv_fac=np.max(np.abs(shifts/errs))
if conv_fac<tol:
print('We have converged.')
converged=True
if not (to_fit is None):
shifts=np.dot(rotmat,shifts)
if not(scale_facs is None):
if iter<len(scale_facs):
print('rescaling shift by ',scale_facs[iter])
shifts=shifts*scale_facs[iter]
to_print=np.asarray([3600*180.0/np.pi,3600*180.0/np.pi,3600*180.0/np.pi,1.0,1.0,3600*180.0/np.pi,3600*180.0/np.pi,3600*180.0/np.pi*np.sqrt(8*np.log(2)),1.0])*(pp-pars)
print('iter ',iter,' max shift is ',conv_fac,' with chisq improvement ',chi_ref-chisq,to_print) #converged,pp,shifts
pp=pp+shifts
iter=iter+1
return pp,chisq
#class Cuts:
# def __init__(self,tod):
# self.tag=tod.info['tag']
# self.ndet=tod.info['dat_calib'].shape[0]
# self.cuts=[None]*self.ndet
#
#class CutsVec:
# def __init__(self,todvec):
# self.ntod=todvec.ntod
# self.cuts=[None]*self.ntod
# for tod in todvec.tods:
# self.cuts[tod.info['tag']]=Cuts(tod)
#this class is pointless, as you can get the same functionality with the tsModel class, which will be
#consistent with other timestream model classes.
#class CutsVecs:
# def __init__(self,todvec,do_add=True):
# #if class(todvec)==CutsVecs: #for use in copy
# if isinstance(todvec,CutsVecs):
# self.cuts=[None]*todvec.ntod
# self.ntod=todvec.ntod
# for i in range(todvec.ntod):
# self.cuts[i]=todvec.cuts[i].copy()
# return
# #if class(todvec)!=TodVec:
# if not(isinstance(todvec,TodVec)):
# print('error in CutsVecs init, must pass in a todvec class.')
# return None
# self.cuts=[None]*todvec.ntod
# self.ntod=todvec.ntod
# for i in range(todvec.ntod):
# tod=todvec.tods[i]
# if tod.info['tag']!=i:
# print('warning, tag mismatch in CutsVecs.__init__')
# print('continuing, but you should be careful...')
# if 'bad_samples' in tod.info:
# self.cuts[i]=Cuts(tod,do_add)
# elif 'mask' in tod.info:
# self.cuts[i]=CutsCompact(tod)
# self.cuts[i].cuts_from_array(tod.info['mask'])
# self.cuts[i].get_imap()
# def copy(self):
# return CutsVecs(self)
# def clear(self):
# for cuts in self.cuts:
# cuts.clear()
# def axpy(self,cutsvec,a):
# assert(self.ntod==cutsvec.ntod)
# for i in range(ntod):
# self.cuts[i].axpy(cutsvec.cuts[i],a)
# def map2tod(self,todvec):
# assert(self.ntod==todvec.ntod)
# for i in range(self.ntod):
# self.cuts[i].map2tod(todvec.tods[i])
# def tod2map(self,todvec,dat):
# assert(self.ntod==todvec.ntod)
# assert(self.ntod==dat.ntod)
# for i in range(self.ntod):
# self.cuts[i].tod2map(todvec.tods[i],dat.tods[i])
# def dot(self,cutsvec):
# tot=0.0
# assert(self.ntod==cutsvec.ntod)
# for i in range(self.ntod):
# tot+=self.cuts[i].dot(cutsvec.cuts[i])
# return tot
| 14,023 | 31.843091 | 175 | py |
minkasi | minkasi-master/minkasi/minkasi_nb.py | import numpy as np
import numba as nb
@nb.njit(parallel=True)
def map2tod_destriped(mat,pars,lims,do_add=True):
ndet=mat.shape[0]
nseg=len(lims)-1
for seg in nb.prange(nseg):
for det in range(ndet):
if do_add:
for i in range(lims[seg],lims[seg+1]):
mat[det,i]=mat[det,i]+pars[det,seg]
else:
for i in range(lims[seg],lims[seg+1]):
mat[det,i]=pars[det,seg]
@nb.njit(parallel=True)
def tod2map_destriped(mat,pars,lims,do_add=True):
ndet=mat.shape[0]
nseg=len(lims)-1
for seg in nb.prange(nseg):
for det in range(ndet):
if do_add==False:
pars[det,seg]=0
for i in range(lims[seg],lims[seg+1]):
pars[det,seg]=pars[det,seg]+mat[det,i]
@nb.njit(parallel=True)
def __map2tod_binned_det_loop(pars,inds,mat,ndet,n):
for det in nb.prange(ndet):
for i in range(n):
mat[det][i]=mat[det][i]+pars[det][inds[i]]
#pars[det][inds[i]]=pars[det][inds[i]]+mat[det][i]
def map2tod_binned_det(mat,pars,vec,lims,nbin,do_add=True):
n=mat.shape[1]
#print('range is ',pars.min(),pars.max())
#inds=np.empty(n,dtype='int64')
fac=nbin/(lims[1]-lims[0])
inds=np.asarray((vec-lims[0])*fac,dtype='int64')
#print('ind range is ',inds.min(),inds.max())
#for i in nb.prange(n):
# inds[i]=(vec[i]-lims[0])*fac
ndet=mat.shape[0]
if do_add==False:
mat[:]=0
__map2tod_binned_det_loop(pars,inds,mat,ndet,n)
#for det in nb.prange(ndet):
# for i in np.arange(n):
# pars[det][inds[i]]=pars[det][inds[i]]+mat[det][i]
@nb.njit(parallel=True)
def __tod2map_binned_det_loop(pars,inds,mat,ndet,n):
for det in nb.prange(ndet):
for i in range(n):
pars[det][inds[i]]=pars[det][inds[i]]+mat[det][i]
def tod2map_binned_det(mat,pars,vec,lims,nbin,do_add=True):
#print('dims are ',mat.shape,pars.shape,vec.shape)
#print('lims are ',lims,nbin,vec.min(),vec.max())
n=mat.shape[1]
fac=nbin/(lims[1]-lims[0])
#inds=np.empty(n,dtype='int64')
#for i in nb.prange(n):
# inds[i]=(vec[i]-lims[0])*fac
inds=np.asarray((vec-lims[0])*fac,dtype='int64')
#print('max is ',inds.max())
ndet=mat.shape[0]
if do_add==False:
mat[:]=0
__tod2map_binned_det_loop(pars,inds,mat,ndet,n)
#for det in nb.prange(ndet):
# for i in np.arange(n):
# pars[det][inds[i]]=pars[det][inds[i]]+mat[det][i]
return 0
#@nb.njit(parallel=True)
#def map2tod_binned_det(mat,pars,vec,lims,nbin,do_add=True):
# n=mat.shape[1]
# inds=np.empty(n,dtype='int')
# fac=nbin/(lims[1]-lims[0])
# for i in nb.prange(n):
# inds[i]=(vec[i]-lims[0])*fac
# ndet=mat.shape[0]
# if do_add==False:
# mat[:]=0
# for det in np.arange(ndet):
# for i in nb.prange(n):
# mat[det][i]=mat[det][i]+pars[det][inds[i]]
@nb.njit(parallel=True)
def fill_elliptical_isobeta(params,dx,dy,pred):
ndet=dx.shape[0]
n=dx.shape[1]
x0=params[0]
y0=params[1]
theta1=params[2]
theta2=params[3]
theta1_inv=1/theta1
theta2_inv=1/theta2
theta1_inv_sqr=theta1_inv**2
theta2_inv_sqr=theta2_inv**2
psi=params[4]
beta=params[5]
amp=params[6]
cosdec=np.cos(y0)
cospsi=np.cos(psi)
sinpsi=np.sin(psi)
mypow=0.5-1.5*beta
for det in nb.prange(ndet):
for j in np.arange(n):
delx=(dx[det,j]-x0)*cosdec
dely=dy[det,j]-y0
xx=delx*cospsi+dely*sinpsi
yy=dely*cospsi-delx*sinpsi
rr=1+theta1_inv_sqr*xx*xx+theta2_inv_sqr*yy*yy
pred[det,j]=amp*(rr**mypow)
@nb.njit(parallel=True)
def fill_elliptical_isobeta_derivs(params,dx,dy,pred,derivs):
"""Fill model/derivatives for an isothermal beta model.
Parameters should be [ra,dec,theta axis 1,theta axis 2,angle,beta,amplitude.
Beta should be positive (i.e. 0.7, not -0.7)."""
ndet=dx.shape[0]
n=dx.shape[1]
x0=params[0]
y0=params[1]
theta1=params[2]
theta2=params[3]
theta1_inv=1/theta1
theta2_inv=1/theta2
theta1_inv_sqr=theta1_inv**2
theta2_inv_sqr=theta2_inv**2
psi=params[4]
beta=params[5]
amp=params[6]
cosdec=np.cos(y0)
sindec=np.sin(y0)/np.cos(y0)
#cosdec=np.cos(dy[0,0])
#cosdec=1.0
cospsi=np.cos(psi)
cc=cospsi**2
sinpsi=np.sin(psi)
ss=sinpsi**2
cs=cospsi*sinpsi
mypow=0.5-1.5*beta
for det in nb.prange(ndet):
for j in np.arange(n):
delx=(dx[det,j]-x0)*cosdec
dely=dy[det,j]-y0
xx=delx*cospsi+dely*sinpsi
yy=dely*cospsi-delx*sinpsi
xfac=theta1_inv_sqr*xx*xx
yfac=theta2_inv_sqr*yy*yy
#rr=1+theta1_inv_sqr*xx*xx+theta2_inv_sqr*yy*yy
rr=1+xfac+yfac
rrpow=rr**mypow
pred[det,j]=amp*rrpow
dfdrr=rrpow/rr*mypow
drdx=-2*delx*(cc*theta1_inv_sqr+ss*theta2_inv_sqr)-2*dely*(theta1_inv_sqr-theta2_inv_sqr)*cs
#drdy=-2*dely*(cc*theta2_inv_sqr+ss*theta1_inv_sqr)-2*delx*(theta1_inv_sqr-theta2_inv_sqr)*cs
drdy=-(2*xx*theta1_inv_sqr*(cospsi*sindec*delx+sinpsi)+2*yy*theta2_inv_sqr*(-sinpsi*sindec*delx+cospsi))
drdtheta=2*(theta1_inv_sqr-theta2_inv_sqr)*(cs*(dely**2-delx**2)+delx*dely*(cc-ss))
#drdtheta=-2*delx**2*cs*(theta_1_inv_sqr-theta_2_inv_sqr)+2*dely*delx*(theta_1_inv_sqr-theta_2_inv_sqr)*(cc-ss)+2*dely**2*cs*(
derivs[0,det,j]=dfdrr*drdx*cosdec
derivs[1,det,j]=dfdrr*drdy
derivs[2,det,j]=dfdrr*xfac*(-2*theta1_inv)
derivs[3,det,j]=dfdrr*yfac*(-2*theta2_inv)
derivs[4,det,j]=dfdrr*drdtheta
derivs[5,det,j]=-1.5*np.log(rr)*amp*rrpow
derivs[6,det,j]=rrpow
@nb.njit(parallel=True)
def fill_elliptical_gauss_derivs(params,dx,dy,pred,derivs):
"""Fill model/derivatives for an elliptical gaussian model.
Parameters should be [ra,dec,sigma axis 1,sigmaaxis 2,angle,amplitude."""
ndet=dx.shape[0]
n=dx.shape[1]
x0=params[0]
y0=params[1]
theta1=params[2]
theta2=params[3]
theta1_inv=1/theta1
theta2_inv=1/theta2
theta1_inv_sqr=theta1_inv**2
theta2_inv_sqr=theta2_inv**2
psi=params[4]
amp=params[5]
cosdec=np.cos(y0)
sindec=np.sin(y0)/np.cos(y0)
cospsi=np.cos(psi)
cc=cospsi**2
sinpsi=np.sin(psi)
ss=sinpsi**2
cs=cospsi*sinpsi
for det in nb.prange(ndet):
for j in np.arange(n):
delx=(dx[det,j]-x0)*cosdec
dely=dy[det,j]-y0
xx=delx*cospsi+dely*sinpsi
yy=dely*cospsi-delx*sinpsi
xfac=theta1_inv_sqr*xx*xx
yfac=theta2_inv_sqr*yy*yy
#rr=1+theta1_inv_sqr*xx*xx+theta2_inv_sqr*yy*yy
rr=xfac+yfac
rrpow=np.exp(-0.5*rr)
pred[det,j]=amp*rrpow
dfdrr=-0.5*rrpow
drdx=-2*delx*(cc*theta1_inv_sqr+ss*theta2_inv_sqr)-2*dely*(theta1_inv_sqr-theta2_inv_sqr)*cs
#drdy=-2*dely*(cc*theta2_inv_sqr+ss*theta1_inv_sqr)-2*delx*(theta1_inv_sqr-theta2_inv_sqr)*cs
drdy=-(2*xx*theta1_inv_sqr*(cospsi*sindec*delx+sinpsi)+2*yy*theta2_inv_sqr*(-sinpsi*sindec*delx+cospsi))
drdtheta=2*(theta1_inv_sqr-theta2_inv_sqr)*(cs*(dely**2-delx**2)+delx*dely*(cc-ss))
#drdtheta=-2*delx**2*cs*(theta_1_inv_sqr-theta_2_inv_sqr)+2*dely*delx*(theta_1_inv_sqr-theta_2_inv_sqr)*(cc-ss)+2*dely**2*cs*(
derivs[0,det,j]=dfdrr*drdx*cosdec
#derivs[1,det,j]=dfdrr*(drdy-2*sindec*delx**2*theta1_inv_sqr)
derivs[1,det,j]=dfdrr*drdy
derivs[2,det,j]=dfdrr*xfac*(-2*theta1_inv)
derivs[3,det,j]=dfdrr*yfac*(-2*theta2_inv)
derivs[4,det,j]=dfdrr*drdtheta
derivs[5,det,j]=rrpow
@nb.njit(parallel=True)
def radec2pix_car(ra,dec,ipix,lims,pixsize,cosdec,ny):
ra=np.ravel(ra)
dec=np.ravel(dec)
ipix=np.ravel(ipix)
n=len(ipix)
for i in nb.prange(n):
xpix=int((ra[i]-lims[0])*cosdec/pixsize+0.5)
ypix=int((dec[i]-lims[2])/pixsize+0.5)
ipix[i]=xpix*ny+ypix
@nb.njit(parallel=True)
def axpy_in_place(y,x,a=1.0):
#add b into a
n=x.shape[0]
m=x.shape[1]
assert(n==y.shape[0])
assert(m==y.shape[1])
#Numba has a bug, as of at least 0.53.1 (an 0.52.0) where
#both parts of a conditional can get executed, so don't
#try to be fancy. Lower-down code can be used in the future.
for i in nb.prange(n):
for j in np.arange(m):
y[i,j]=y[i,j]+x[i,j]*a
#isone=(a==1.0)
#if isone:
# for i in nb.prange(n):
# for j in np.arange(m):
# y[i,j]=y[i,j]+x[i,j]
#else:
# for i in nb.prange(n):
# for j in np.arange(m):
# y[i,j]=y[i,j]+x[i,j]*a
@nb.njit(parallel=True)
def scale_matrix_by_vector(mat,vec,axis=1):
n=mat.shape[0]
m=mat.shape[1]
if axis==1:
assert(len(vec)==n)
for i in nb.prange(n):
for j in np.arange(m):
mat[i,j]=mat[i,j]*vec[i]
elif axis==0:
assert(len(vec)==m)
for i in nb.prange(n):
for j in np.arange(m):
mat[i,j]=mat[i,j]*vec[j]
else:
print('unsupported number of dimensions in scale_matrix_by_vector')
| 9,537 | 31.114478 | 138 | py |
minkasi | minkasi-master/minkasi/minkasi.py | import os
import numpy as np
import ctypes
import time
from . import mkfftw
#import pyfits
from astropy.io import fits as pyfits
import astropy
from astropy import wcs
from astropy.io import fits
from astropy.cosmology import WMAP9 as cosmo #choose your cosmology here
import scipy
import copy
import sys
from numba import jit
try:
import healpy
have_healpy=True
except:
have_healpy=False
try:
import numba as nb
from . import minkasi_nb
have_numba=True
except:
have_numba=False
try:
import qpoint as qp
have_qp=True
except:
have_qp=False
print('importing mpi4py')
try:
import mpi4py.rc
mpi4py.rc.threads = False
from mpi4py import MPI
print('mpi4py imported')
comm=MPI.COMM_WORLD
myrank = comm.Get_rank()
nproc=comm.Get_size()
print('nproc:, ', nproc)
if nproc>1:
have_mpi=True
else:
have_mpi=False
except:
have_mpi=False
myrank=0
nproc=1
#try:
# import numba as nb
# have_numba=True
#else:
# have_numba=False
try:
mylib=ctypes.cdll.LoadLibrary("libminkasi.so")
except OSError:
mylib=ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(os.path.abspath(__file__)), "libminkasi.so"))
tod2map_simple_c=mylib.tod2map_simple
tod2map_simple_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
tod2map_atomic_c=mylib.tod2map_atomic
tod2map_atomic_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
#tod2map_everyone_c=mylib.tod2map_everyone
#tod2map_everyone_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
tod2map_omp_c=mylib.tod2map_omp
tod2map_omp_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
tod2map_cached_c=mylib.tod2map_cached
tod2map_cached_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
map2tod_simple_c=mylib.map2tod_simple
map2tod_simple_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
map2tod_omp_c=mylib.map2tod_omp
map2tod_omp_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,ctypes.c_int]
map2tod_iqu_omp_c=mylib.map2tod_iqu_omp
map2tod_iqu_omp_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
map2tod_qu_omp_c=mylib.map2tod_qu_omp
map2tod_qu_omp_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p,ctypes.c_int]
tod2map_iqu_simple_c=mylib.tod2map_iqu_simple
tod2map_iqu_simple_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
tod2map_qu_simple_c=mylib.tod2map_qu_simple
tod2map_qu_simple_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
tod2map_iqu_precon_simple_c=mylib.tod2map_iqu_precon_simple
tod2map_iqu_precon_simple_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
tod2map_qu_precon_simple_c=mylib.tod2map_qu_precon_simple
tod2map_qu_precon_simple_c.argtypes=[ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,ctypes.c_int,ctypes.c_void_p]
scan_map_c=mylib.scan_map
scan_map_c.argtypes=[ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int]
tod2cuts_c=mylib.tod2cuts
tod2cuts_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int]
cuts2tod_c=mylib.cuts2tod
cuts2tod_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int]
set_nthread_c=mylib.set_nthread
set_nthread_c.argtypes=[ctypes.c_int]
get_nthread_c=mylib.get_nthread
get_nthread_c.argtypes=[ctypes.c_void_p]
fill_isobeta_c=mylib.fill_isobeta
fill_isobeta_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int]
fill_isobeta_derivs_c=mylib.fill_isobeta_derivs
fill_isobeta_derivs_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int]
fill_gauss_derivs_c=mylib.fill_gauss_derivs
fill_gauss_derivs_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int]
fill_gauss_src_c=mylib.fill_gauss_src
fill_gauss_src_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int]
outer_c=mylib.outer_block
outer_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int]
def y2rj(freq=90):
"""conversion to multiply a y map by to get a Rayleigh-Jeans normalized map
note that it doesn't have the T_cmb at the end, so the value for low frequencies
is -2."""
kb=1.38064852e-16
h=6.62607004e-27
T=2.725
x=freq*1e9*h/kb/T
ex=np.exp(x)
f=x**2*ex/(ex-1)**2*( x*(ex+1)/(ex-1)-4)
return f
def planck_g(freq=90):
"""conversion between T_CMB and T_RJ as a function of frequency."""
kb=1.38064852e-16
h=6.62607004e-27
T=2.725
x=freq*1e9*h/kb/T
ex=np.exp(x)
return x**2*ex/( (ex-1)**2)
def report_mpi():
if have_mpi:
print('myrank is ',myrank,' out of ',nproc)
else:
print('mpi not found')
def barrier():
if have_mpi:
comm.barrier()
else:
pass
def invsafe(mat,thresh=1e-14):
u,s,v=np.linalg.svd(mat,0)
ii=np.abs(s)<thresh*s.max()
#print ii
s_inv=1/s
s_inv[ii]=0
tmp=np.dot(np.diag(s_inv),u.transpose())
return np.dot(v.transpose(),tmp)
def tod2map_simple(map,dat,ipix):
ndet=dat.shape[0]
ndata=dat.shape[1]
if not(ipix.dtype=='int32'):
print("Warning - ipix is not int32 in tod2map_simple. this is likely to produce garbage results.")
tod2map_simple_c(map.ctypes.data,dat.ctypes.data,ndet,ndata,ipix.ctypes.data)
def tod2map_everyone(map,dat,ipix,edges):
assert(len(edges)==get_nthread()+1)
tod2map_everyone_c(map.ctypes.data,dat.ctypes.data,dat.shape[0],dat.shape[1],ipix.ctypes.data,map.size,edges.ctypes.data,len(edges))
def tod2map_omp(map,dat,ipix,atomic=False):
ndet=dat.shape[0]
ndata=dat.shape[1]
if not(ipix.dtype=='int32'):
print("Warning - ipix is not int32 in tod2map_omp. this is likely to produce garbage results.")
if atomic:
tod2map_atomic_c(map.ctypes.data,dat.ctypes.data,ndet,ndata,ipix.ctypes.data,map.size)
else:
tod2map_omp_c(map.ctypes.data,dat.ctypes.data,ndet,ndata,ipix.ctypes.data,map.size)
def tod2map_cached(map,dat,ipix):
ndet=dat.shape[0]
ndata=dat.shape[1]
if not(ipix.dtype=='int32'):
print("Warning - ipix is not int32 in tod2map_cached. this is likely to produce garbage results.")
tod2map_cached_c(map.ctypes.data,dat.ctypes.data,ndet,ndata,ipix.ctypes.data,map.shape[1])
def tod2polmap(map,dat,poltag,twogamma,ipix):
ndet=dat.shape[0]
ndata=dat.shape[1]
fun=None
if poltag=='QU':
fun=tod2map_qu_simple_c
if poltag=='IQU':
fun=tod2map_iqu_simple_c
if poltag=='QU_PRECON':
fun=tod2map_qu_precon_simple_c
if poltag=='IQU_PRECON':
fun=tod2map_iqu_precon_simple_c
if fun is None:
print('unrecognized poltag ' + repr(poltag) + ' in tod2polmap.')
#print('calling ' + repr(fun))
fun(map.ctypes.data,dat.ctypes.data,twogamma.ctypes.data,ndet,ndata,ipix.ctypes.data)
def map2tod(dat,map,ipix,do_add=False,do_omp=True):
ndet=dat.shape[0]
ndata=dat.shape[1]
if do_omp:
map2tod_omp_c(dat.ctypes.data, map.ctypes.data, ndet, ndata, ipix.ctypes.data, do_add)
else:
map2tod_simple_c(dat.ctypes.data,map.ctypes.data,ndet,ndata,ipix.ctypes.data,do_add)
def polmap2tod(dat,map,poltag,twogamma,ipix,do_add=False,do_omp=True):
ndet=dat.shape[0]
ndata=dat.shape[1]
fun=None
if poltag=='QU':
fun=map2tod_qu_omp_c
if poltag=='IQU':
fun=map2tod_iqu_omp_c
if poltag=='QU_PRECON':
fun=map2tod_qu_precon_omp_c
if poltag=='IQU_PRECON':
fun=map2tod_iqu_precon_omp_c
if fun is None:
print('unknown poltag ' + repr(poltag) + ' in polmap2tod.')
return
#print('calling ' + repr(fun))
fun(dat.ctypes.data,map.ctypes.data,twogamma.ctypes.data,ndet,ndata,ipix.ctypes.data,do_add)
@jit(nopython=True)
def map2todbowl(vecs, params):
"""
Converts parameters to tods for the tsBowl class.
Parameters
----------
vecs: np.array(order, ndata, ndet)
pseudo-Vandermonde matrix
params: np.array(order, ndet)
corresponding weights for pseudo-Vandermonde matrix
"""
#Return tod should have shape ndet x ndata
to_return = np.zeros((vecs.shape[0], vecs.shape[-2]))
for i in range(vecs.shape[0]):
to_return[i] = np.dot(vecs[i,...], params[i,...])
return to_return
@jit(nopython=True)
def tod2mapbowl(vecs, mat):
"""
transpose of map2tod for bowling
Parameters
----------
vecs: np.array(ndet, ndata, order)
pseudo-Vandermonde matrix
mat: np.array(ndet, ndata)
tod data
"""
#Return tod should have shape ndet x ndata
to_return = np.zeros((vecs.shape[0], vecs.shape[-1]))
for i in range(vecs.shape[0]):
to_return[i] = np.dot(vecs[i,...].T, mat[i,...])
return to_return
def read_fits_map(fname,hdu=0,do_trans=True):
f=fits.open(fname)
raw=f[hdu].data
tmp=raw.copy()
f.close()
if do_trans:
tmp=(tmp.T).copy()
return tmp
def write_fits_map_wheader(map,fname,header,do_trans=True):
if do_trans:
map=(map.T).copy()
hdu=fits.PrimaryHDU(map,header=header)
try:
hdu.writeto(fname,overwrite=True)
except:
hdu.writeto(fname,clobber=True)
def get_ft_vec(n):
x=np.arange(n)
x[x>n/2]=x[x>n/2]-n
return x
def set_nthread(nthread):
set_nthread_c(nthread)
def get_nthread():
nthread=np.zeros([1,1],dtype='int32')
get_nthread_c(nthread.ctypes.data)
return nthread[0,0]
def segs_from_vec(vec,pad=True):
""" segs_from_vec(vec,pad=True)
return the starting/stopping points of regions marked False in vec. For use in e.g. generating
cuts from a vector/array. If pad is False, assume vector is already True-padded"""
#insert input vector into a True-padded vector do make reasoning about starting/stopping points
#of False regions easier.
if pad:
vv=np.ones(len(vec)+2,dtype='bool')
vv[1:-1]=vec
else:
if vec.dtype=='bool':
vv=vec
else:
vv=np.ones(len(vec),dtype='bool')
vv[:]=vec
if vv.min()==True:
nseg=0
istart=[]
istop=[]
else:
inds=np.where(np.diff(vv))[0]
assert(len(inds)%2==0)
nseg=len(inds)//2
istart=[]
istop=[]
for i in range(nseg):
istart.append(inds[2*i])
istop.append(inds[2*i+1])
return nseg,istart,istop
def cut_blacklist(tod_names,blacklist):
mydict={}
for nm in tod_names:
tt=nm.split('/')[-1]
mydict[tt]=nm
ncut=0
for nm in blacklist:
tt=nm.split('/')[-1]
#if mydict.has_key(tt):
if tt in mydict:
ncut=ncut+1
del(mydict[tt])
if ncut>0:
print('deleted ',ncut,' bad files.')
mynames=mydict.values()
mynames.sort()
return mynames
else:
return tod_names
def find_spikes(dat,inner=1,outer=10,rad=0.25,thresh=8,pad=2):
#find spikes in a block of timestreams
n=dat.shape[1];
ndet=dat.shape[0]
x=np.arange(n);
filt1=np.exp(-0.5*x**2/inner**2)
filt1=filt1+np.exp(-0.5*(x-n)**2/inner**2);
filt1=filt1/filt1.sum()
filt2=np.exp(-0.5*x**2/outer**2)
filt2=filt2+np.exp(-0.5*(x-n)**2/outer**2);
filt2=filt2/filt2.sum()
filt=filt1-filt2 #make a filter that is the difference of two Gaussians, one narrow, one wide
filtft=np.fft.rfft(filt)
datft=np.fft.rfft(dat,axis=1)
datfilt=np.fft.irfft(filtft*datft,axis=1,n=n)
jumps=[None]*ndet
mystd=np.median(np.abs(datfilt),axis=1)
for i in range(ndet):
while np.max(np.abs(datfilt[i,:]))>thresh*mystd[i]:
ind=np.argmax(np.abs(datfilt[i,:]))
if jumps[i] is None:
jumps[i]=[ind]
else:
jumps[i].append(ind)
datfilt[i,ind]=0
return jumps,datfilt
return mystd
def make_rings_wSlope(edges,cent,vals,map,pixsize=2.0,fwhm=10.0,amps=None,aa=1.0,bb=1.0,rot=0.0):
xvec=np.arange(map.nx)
yvec=np.arange(map.ny)
xvec[map.nx//2:]=xvec[map.nx//2:]-map.nx
yvec[map.ny//2:]=yvec[map.ny//2:]-map.ny
xmat=np.repeat([xvec],map.ny,axis=0).transpose()
ymat=np.repeat([yvec],map.nx,axis=0)
rmat=np.sqrt(xmat**2+ymat**2)*pixsize
if isinstance(fwhm,int)|isinstance(fwhm,float):
sig=fwhm/np.sqrt(8*np.log(2.))
src_map=np.exp(-0.5*rmat**2./sig**2)
src_map=src_map/src_map.sum()
else:
sig=fwhm[0]/np.sqrt(8*np.log(2))
src_map=np.exp(-0.5*rmat**2/sig**2)*amps[0]
for i in range(1,len(fwhm)):
sig=fwhm[i]/np.sqrt(8*np.log(2))
src_map=src_map+np.exp(-0.5*rmat**2/sig**2)*amps[i]
src_map=src_map/src_map.sum()
beam_area=pixsize**2/src_map.max()
beam_area=beam_area/3600**2/(360**2/np.pi)
print('beam_area is ',beam_area*1e9,' nsr')
nring=len(edges)-1
rings=np.zeros([nring,map.nx,map.ny])
mypix=map.wcs.wcs_world2pix(cent[0],cent[1],1)
print('mypix is ',mypix)
xvec=np.arange(map.nx)
yvec=np.arange(map.ny)
xmat=np.repeat([xvec],map.ny,axis=0).transpose()
ymat=np.repeat([yvec],map.nx,axis=0)
srcft=np.fft.fft2(src_map)
xtr = (xmat-mypix[0])*np.cos(rot) + (ymat-mypix[1])*np.sin(rot) # Rotate and translate x coords
ytr = (ymat-mypix[1])*np.cos(rot) - (xmat-mypix[0])*np.sin(rot) # Rotate and translate y coords
rmat = np.sqrt( (xtr/aa)**2 + (ytr/bb)**2 ) * pixsize # Elliptically scale x,y
myvals = vals[:nring]*1.0 # Get just the values that correspond to rings
myvals -= np.max(myvals) # Set it such that the maximum value approaches 0
pk2pk = np.max(myvals) - np.min(myvals)
myvals -= pk2pk/50.0 # Let's assume we're down about a factor of 50 at the outskirts.
for i in range(nring):
#rings[i,(rmat>=edges[i])&(rmat<edges[i+1]=1.0
if i == nring-1:
slope=0.0
else:
slope = (myvals[i]-myvals[i+1])/(edges[i+1]-edges[i]) # expect positve slope; want negative one.
rgtinedge = (rmat>=edges[i])
rfromin = (rmat-edges[i])
initline = rfromin[rgtinedge]*slope
if vals[i] != 0:
rings[i,rgtinedge] = (myvals[i] - initline)/myvals[i] # Should be normalized to 1 now.
else:
rings[i,rgtinedge] = 1.0
rgtoutedge = (rmat>=edges[i+1])
rings[i,rgtoutedge]=0.0
myannul = [ c1 and not(c2) for c1,c2 in zip(rgtinedge.ravel(),rgtoutedge.ravel())]
rannul = rmat.ravel()[myannul]
rmin = (rmat == np.min(rannul))
rmout = (rmat == np.max(rannul))
rings[i,:,:]=np.real(np.fft.ifft2(np.fft.fft2(rings[i,:,:])*srcft))
return rings
def make_rings(edges,cent,map,pixsize=2.0,fwhm=10.0,amps=None,iswcs=True):
xvec=np.arange(map.nx)
yvec=np.arange(map.ny)
ix=int(map.nx/2)
iy=int(map.ny/2)
xvec[ix:]=xvec[ix:]-map.nx
yvec[iy:]=yvec[iy:]-map.ny
#xvec[map.nx/2:]=xvec[map.nx/2:]-map.nx
#yvec[map.ny/2:]=yvec[map.ny/2:]-map.ny
xmat=np.repeat([xvec],map.ny,axis=0).transpose()
ymat=np.repeat([yvec],map.nx,axis=0)
rmat=np.sqrt(xmat**2+ymat**2)*pixsize
if isinstance(fwhm,int)|isinstance(fwhm,float):
sig=fwhm/np.sqrt(8*np.log(2))
src_map=np.exp(-0.5*rmat**2/sig**2)
src_map=src_map/src_map.sum()
else:
sig=fwhm[0]/np.sqrt(8*np.log(2))
src_map=np.exp(-0.5*rmat**2/sig**2)*amps[0]
for i in range(1,len(fwhm)):
sig=fwhm[i]/np.sqrt(8*np.log(2))
src_map=src_map+np.exp(-0.5*rmat**2/sig**2)*amps[i]
src_map=src_map/src_map.sum()
beam_area=pixsize**2/src_map.max()
beam_area=beam_area/3600**2/(360**2/np.pi)
print('beam_area is ',beam_area*1e9,' nsr')
nring=len(edges)-1
rings=np.zeros([nring,map.nx,map.ny])
if iswcs:
mypix=map.wcs.wcs_world2pix(cent[0],cent[1],1)
else:
mypix=cent
print('mypix is ',mypix)
xvec=np.arange(map.nx)
yvec=np.arange(map.ny)
xmat=np.repeat([xvec],map.ny,axis=0).transpose()
ymat=np.repeat([yvec],map.nx,axis=0)
srcft=np.fft.fft2(src_map)
rmat=np.sqrt( (xmat-mypix[0])**2+(ymat-mypix[1])**2)*pixsize
for i in range(nring):
#rings[i,(rmat>=edges[i])&(rmat<edges[i+1]=1.0
rings[i,(rmat>=edges[i])]=1.0
rings[i,(rmat>=edges[i+1])]=0.0
rings[i,:,:]=np.real(np.fft.ifft2(np.fft.fft2(rings[i,:,:])*srcft))
return rings
def find_jumps(dat,width=10,pad=2,thresh=10,rat=0.5):
#find jumps in a block of timestreams, preferably with the common mode removed
#width is width in pixels to average over when looking for a jump
#pad is the length in units of width to mask at beginning/end of timestream
#thresh is threshold in units of filtered data median absolute deviation to qualify as a jump
#rat is the ratio of largest neighboring opposite-sign jump to the found jump. If
# there is an opposite-sign jump nearby, the jump finder has probably just picked up a spike.
n=dat.shape[1]
ndet=dat.shape[0]
#make a filter template that is a gaussian with sigma with, sign-flipped in the center
#so, positive half-gaussian starting from zero, and negative half-gaussian at the end
x=np.arange(n)
myfilt=np.exp(-0.5*x**2/width**2)
myfilt=myfilt-np.exp( (-0.5*(x-n)**2/width**2))
fac=np.abs(myfilt).sum()/2.0
myfilt=myfilt/fac
dat_filt=np.fft.rfft(dat,axis=1)
myfilt_ft=np.fft.rfft(myfilt)
dat_filt=dat_filt*np.repeat([myfilt_ft],ndet,axis=0)
dat_filt=np.fft.irfft(dat_filt,axis=1,n=n)
dat_filt_org=dat_filt.copy()
print(dat_filt.shape)
dat_filt[:,0:pad*width]=0
dat_filt[:,-pad*width:]=0
det_thresh=thresh*np.median(np.abs(dat_filt),axis=1)
dat_dejump=dat.copy()
jumps=[None]*ndet
print('have filtered data, now searching for jumps')
for i in range(ndet):
while np.max(np.abs(dat_filt[i,:]))>det_thresh[i]:
ind=np.argmax(np.abs(dat_filt[i,:]))+1 #+1 seems to be the right index to use
imin=ind-width
if imin<0:
imin=0
imax=ind+width
if imax>n:
imax=n
val=dat_filt[i,ind]
if val>0:
val2=np.min(dat_filt[i,imin:imax])
else:
val2=np.max(dat_filt[i,imin:imax])
print('found jump on detector ',i,' at sample ',ind)
if np.abs(val2/val)>rat:
print('I think this is a spike due to ratio ',np.abs(val2/val))
else:
if jumps[i] is None:
jumps[i]=[ind]
else:
jumps[i].append(ind)
#independent of if we think it is a spike or a jump, zap that stretch of the data
dat_dejump[i,ind:]=dat_dejump[i,ind:]+dat_filt[i,ind]
dat_filt[i,ind-pad*width:ind+pad*width]=0
if not(jumps[i] is None):
jumps[i]=np.sort(jumps[i])
#return dat_dejump,jumps,dat_filt_org
return jumps
def fit_jumps_from_cm(dat,jumps,cm,cm_order=1,poly_order=1):
jump_vals=jumps[:]
ndet=len(jumps)
n=dat.shape[1]
x=np.linspace(-1,1,n)
m1=np.polynomial.legendre.legvander(x,poly_order)
m2=np.polynomial.legendre.legvander(x,cm_order-1)
for i in range(cm_order):
m2[:,i]=m2[:,i]*cm
mat=np.append(m1,m2,axis=1)
npp=mat.shape[1]
dat_dejump=dat.copy()
for i in range(ndet):
if not(jumps[i] is None):
njump=len(jumps[i])
segs=np.append(jumps[i],n)
print('working on detector ',i,' who has ', len(jumps[i]),' jumps with segments ',segs)
mm=np.zeros([n,npp+njump])
mm[:,:npp]=mat
for j in range(njump):
mm[segs[j]:segs[j+1],j+npp]=1.0
lhs=np.dot(mm.transpose(),mm)
#print lhs
rhs=np.dot(mm.transpose(),dat[i,:].transpose())
lhs_inv=np.linalg.inv(lhs)
fitp=np.dot(lhs_inv,rhs)
jump_vals[i]=fitp[npp:]
jump_pred=np.dot(mm[:,npp:],fitp[npp:])
dat_dejump[i,:]=dat_dejump[i,:]-jump_pred
return dat_dejump
#for i in range(ndet):
def gapfill_eig(dat,cuts,tod=None,thresh=5.0, niter_eig=3, niter_inner=3, insert_cuts=False):
ndat=dat.shape[1]
cuts_empty=cuts.copy() #use this to clear out cut samples
cuts_empty.clear()
cuts_cur=cuts.copy()
cuts_cur.clear()
for eig_ctr in range(niter_eig):
tmp=dat.copy()
cuts_cur.map2tod(tod,tmp,do_add=False)
mycov=np.dot(tmp,tmp.T)
ee,vv=np.linalg.eig(mycov)
mask=ee>thresh*thresh*np.median(ee)
neig=np.sum(mask)
print('working with ' + repr(neig) + ' eigenvectors.')
ee=ee[mask]
vv=vv[:,mask]
uu=np.dot(vv.T,tmp)
lhs=np.dot(uu,uu.T)
lhs_inv=np.linalg.inv(lhs)
for iter_ctr in range(niter_inner):
#in this inner loop, we fit the data
rhs=np.dot(tmp,uu.T)
fitp=np.dot(lhs_inv,rhs.T)
pred=np.dot(fitp.T,uu)
cuts_cur.tod2map(tod,pred,do_add=False)
cuts_cur.map2tod(tod,tmp,do_add=False)
if insert_cuts:
cuts_cur.map2tod(dat)
return cuts_cur
def __gapfill_eig_poly(dat,cuts,tod=None,npoly=2, thresh=5.0, niter_eig=3, niter_inner=3):
assert(1==0) #this code is not yet working. regular gapfill_eig should work since the polys could
#be described by SVD, so SVD modes should look like polys iff they would have been important
ndat=dat.shape[1]
if npoly>0:
xvec=np.linspace(-1,1,ndat)
polymat=np.polynomial.legendre.legvander(x,npoly-1)
old_coeffs=None
cuts_cur=cuts.copy()
cuts_cur.clear()
cuts_empty.cuts.copy()
cuts_empty.clear()
for eig_ctr in range(niter_eig):
tmp=dat.copy()
cuts_cur.map2tod(tod,tmp,do_add=False) #insert current best-guess solution for the cuts
if npoly>1: #if we're fitting polynomials as well as eigenmodes, subtract them off before re-estimating the covariance
if not(old_coeffs is None):
tmp=tmp-np.dot(polymat,old_coeffs[neig:,:]).T
mycov=np.dot(tmp,tmp.T)
mycov=0.5*(mycov+mycov.T)
ee,vv=np.linalg.eig(mycov)
mode_map=ee>thresh*thresh*np.median(ee)
neig=mode_map.sum()
mat=np.zeros([ndat,neig+npoly])
eigs=vv[:,mode_map]
ts_vecs=np.dot(eigs.T,tmp)
mat[:,:neig]=ts_vecs.T
if npoly>0:
mat[:,neig:]=polymat
lhs=np.dot(mat.T,mat)
lhs_inv=np.linalg.inv(lhs)
#now that we have the vectors we expect to describe our data, do a few rounds
#of fitting amplitudes to timestream models, subtract that off, assign cuts to zero,
#and restore the model.
tmp=dat.copy()
for inner_ctr in range(niter_inner):
cuts_cur.map2tod(tod,tmp)
rhs=np.dot(tmp,mat)
fitp=np.dot(lhs_inv,rhs.T)
pred=np.dot(mat,fitp).T
def get_type(nbyte):
if nbyte==8:
return np.dtype('float64')
if nbyte==4:
return np.dtype('float32')
if nbyte==-4:
return np.dtype('int32')
if nbyte==-8:
return np.dtype('int64')
if nbyte==1:
return np.dtype('str')
print('Unsupported nbyte ' + repr(nbyte) + ' in get_type')
return None
def read_octave_struct(fname):
f=open(fname)
nkey=np.fromfile(f,'int32',1)[0]
#print 'nkey is ' + repr(nkey)
dat={}
for i in range(nkey):
key=f.readline().strip()
#print 'key is ' + key
ndim=np.fromfile(f,'int32',1)[0]
dims=np.fromfile(f,'int32',ndim)
dims=np.flipud(dims)
#print 'Dimensions of ' + key + ' are ' + repr(dims)
nbyte=np.fromfile(f,'int32',1)[0]
#print 'nbyte is ' + repr(nbyte)
dtype=get_type(nbyte)
tmp=np.fromfile(f,dtype,dims.prod())
dat[key]=np.reshape(tmp,dims)
f.close()
return dat
def nsphere_vol(npp):
iseven=(npp%2)==0
if iseven:
nn=npp/2
vol=(np.pi**nn)/np.prod(np.arange(1,nn+1))
else:
nn=(npp-1)/2
vol=2**(nn+1)*np.pi**nn/np.prod(np.arange(1,npp+1,2))
return vol
def _prime_loop(ln,lp,icur,lcur,vals):
facs=np.arange(lcur,ln+1e-3,lp[0])
if len(lp)==1:
nfac=len(facs)
if (nfac>0):
vals[icur:(icur+nfac)]=facs
icur=icur+nfac
#print 2**vals[:icur]
else:
print('bad facs came from ' + repr([2**lcur,2**ln,2**lp[0]]))
#print icur
return icur
else:
facs=np.arange(lcur,ln,lp[0])
for fac in facs:
icur=_prime_loop(ln,lp[1:],icur,fac,vals)
return icur
print('I don''t think I should have gotten here.')
return icur
def find_good_fft_lens(n,primes=[2,3,5,7]):
lmax=np.log(n+0.5)
npr=len(primes)
vol=nsphere_vol(npr)
r=np.log2(n+0.5)
lp=np.log2(primes)
int_max=(vol/2**npr)*np.prod(r/lp)+30 #add a bit just to make sure we don't act up for small n
#print 'int max is ',int max
int_max=int(int_max)
#vals=np.zeros(int_max,dtype='int')
vals=np.zeros(int_max)
icur=0
icur=_prime_loop(r,lp,icur,0.0,vals)
assert(icur<=int_max)
myvals=np.asarray(np.round(2**vals[:icur]),dtype='int')
myvals=np.sort(myvals)
return myvals
def _linfit_2mat(dat,mat1,mat2):
np1=mat1.shape[1]
np2=mat2.shape[1]
mm=np.append(mat1,mat2,axis=1)
lhs=np.dot(mm.transpose(),mm)
rhs=np.dot(mm.transpose(),dat)
lhs_inv=np.linalg.inv(lhs)
fitp=np.dot(lhs_inv,rhs)
fitp1=fitp[0:np1].copy()
fitp2=fitp[np1:].copy()
assert(len(fitp2)==np2)
return fitp1,fitp2
def fit_mat_vecs_poly_nonoise(dat,mat,order,cm_order=None):
if cm_order is None:
cm_order=order
n=dat.shape[1]
x=np.linspace(-1,1,n)
polys=np.polynomial.legendre.legvander(x,order).transpose()
cm_polys=np.polynomial.legendre.legvander(x,cm_order).transpose()
v1=np.sum(dat,axis=0)
v2=np.sum(dat*mat,axis=0)
rhs1=np.dot(cm_polys,v1)
rhs2=np.dot(polys,v2)
ndet=dat.shape[0]
A1=cm_polys*ndet
vv=np.sum(mat,axis=0)
A2=polys*np.repeat([vv],order+1,axis=0)
A=np.append(A1,A2,axis=0)
rhs=np.append(rhs1,rhs2)
lhs=np.dot(A,A.transpose())
fitp=np.dot(np.linalg.inv(lhs),rhs)
cm_fitp=fitp[:cm_order+1]
mat_fitp=fitp[cm_order+1:]
assert(len(mat_fitp)==(order+1))
cm_pred=np.dot(cm_fitp,cm_polys)
tmp=np.dot(mat_fitp,polys)
mat_pred=np.repeat([tmp],ndet,axis=0)*mat
pred=cm_pred+mat_pred
return pred,cm_fitp,mat_fitp,polys
def smooth_spectra(spec,fwhm):
nspec=spec.shape[0]
n=spec.shape[1]
x=np.arange(n)
sig=fwhm/np.sqrt(8*np.log(2))
to_conv=np.exp(-0.5*(x/sig)**2)
tot=to_conv[0]+to_conv[-1]+2*to_conv[1:-1].sum() #r2r normalization
to_conv=to_conv/tot
to_conv_ft=mkfftw.fft_r2r(to_conv)
xtrans=mkfftw.fft_r2r(spec)
for i in range(nspec):
xtrans[i,:]=xtrans[i,:]*to_conv_ft
#return mkfftw.fft_r2r(xtrans)/(2*(xtrans.shape[1]-1)),to_conv
return xtrans,to_conv_ft
def smooth_many_vecs(vecs,fwhm=20):
n=vecs.shape[1]
nvec=vecs.shape[0]
x=np.arange(n)
sig=fwhm/np.sqrt(8*np.log(2))
to_conv=np.exp(-0.5*(x/sig)**2)
tot=to_conv[0]+to_conv[-1]+2*to_conv[1:-1].sum() #r2r normalization
to_conv=to_conv/tot
to_conv_ft=mkfftw.fft_r2r(to_conv)
xtrans=mkfftw.fft_r2r(vecs)
for i in range(nvec):
xtrans[i,:]=xtrans[i,:]*to_conv_ft
back=mkfftw.fft_r2r(xtrans)
return back/(2*(n-1))
def smooth_vec(vec,fwhm=20):
n=vec.size
x=np.arange(n)
sig=fwhm/np.sqrt(8*np.log(2))
to_conv=np.exp(-0.5*(x/sig)**2)
tot=to_conv[0]+to_conv[-1]+2*to_conv[1:-1].sum() #r2r normalization
to_conv=to_conv/tot
to_conv_ft=mkfftw.fft_r2r(to_conv)
xtrans=mkfftw.fft_r2r(vec)
back=mkfftw.fft_r2r(xtrans*to_conv_ft)
return back/2.0/(n-1)
def fit_cm_plus_poly(dat,ord=2,cm_ord=1,niter=2,medsub=False,full_out=False):
n=dat.shape[1]
ndet=dat.shape[0]
if medsub:
med=np.median(dat,axis=1)
dat=dat-np.repeat([med],n,axis=0).transpose()
xx=np.arange(n)+0.0
xx=xx-xx.mean()
xx=xx/xx.max()
pmat=np.polynomial.legendre.legvander(xx,ord)
cm_pmat=np.polynomial.legendre.legvander(xx,cm_ord-1)
calfacs=np.ones(ndet)*1.0
dd=dat.copy()
for i in range(1,niter):
for j in range(ndet):
dd[j,:]/=calfacs[j]
cm=np.median(dd,axis=0)
cm_mat=np.zeros(cm_pmat.shape)
for i in range(cm_mat.shape[1]):
cm_mat[:,i]=cm_pmat[:,i]*cm
fitp_p,fitp_cm=_linfit_2mat(dat.transpose(),pmat,cm_mat)
pred1=np.dot(pmat,fitp_p).transpose()
pred2=np.dot(cm_mat,fitp_cm).transpose()
pred=pred1+pred2
dd=dat-pred1
if full_out:
return dd,pred2,cm #if requested, return the modelled CM as well
return dd
def run_pcg(b,x0,tods,precon=None,maxiter=25,outroot='map',save_iters=[-1],save_ind=0,save_tail='.fits',plot_iters=[],plot_info=None,plot_ind=0):
"""
Function which runs preconditioned conjugate gradient on a bundle of tods to generate a map.
PCG itteratively approximates the solution to the linear equation Ax = b for A a matrix, x
and b vectors. In the map making equation, A = P'N"P and b = P'N"d for d the vector of TODs,
N the noise matrix, P the tod to map pointing matrix, i.e. a matrix that specifies which
pixel in the map was observed by each TOD data point. Futher x is the map.
Arguments:
b: The rhs of the equation. In our case this is P'N''d. The tod class has a built in
method for computing this.
x0: The initial guess. Generally set to for the first itteration and then to the output
of the previous itteration.
tods: the input tods we want to make into maps. Note the noise has already been estimated and is within the tod object.
precon: The preconditioner. A matrix applied to A to ensure faster convergence. 1/hitsmap
is a frequent selection.
maxiter: Maximum number of iterations to perform.
outroot: location at which to save the output map
save_iters: The iterations at which to save the result map. Default is to save only the
last
save_ind:
save_tail: Extention for saving the output maps
plot_iters: Which iterations to plot
plot_info:
plot_ind:
Outputs:
x: best guess for x after the conversion criteria has been reached (either max iter or
Ax = b close enough to 0
"""
t1=time.time()
Ax=tods.dot(x0)
try:
#compute the remainder r_0
r=b.copy()
r.axpy(Ax,-1)
except:
r=b-Ax
if not(precon is None):
#print('applying precon')
# z_0 = M*r_0
z=precon*r
key = tods.tods[0].info['fname']
else:
z=r.copy()
#Initial p_0 = z_0 = M*r_0
p=z.copy()
k=0.0
#compute z*r, which is used for computing alpha
zr=r.dot(z)
#make a copy of our initial guess
x=x0.copy()
t2=time.time()
nsamp=tods.get_nsamp()
tloop=time.time()
for iter in range(maxiter):
if myrank==0:
if iter>0:
print(iter,zr,alpha,t2-t1,t3-t2,t3-t1,nsamp/(t2-t1)/1e6)
else:
print(iter,zr,t2-t1)
t1=time.time()
#Compute pAp
Ap=tods.dot(p)
t2=time.time()
pAp=p.dot(Ap)
#Compute alpha_k
alpha=zr/pAp
#print('alpha,pAp, and zr are ' + repr(alpha) + ' ' + repr(pAp) + ' ' + repr(zr))
try:
#Update guess using alpha
x_new=x.copy()
x_new.axpy(p,alpha)
except:
x_new=x+p*alpha
try:
#Write down next remainder r_k+1
r_new=r.copy()
r_new.axpy(Ap,-alpha)
except:
r_new=r-Ap*alpha
if not(precon is None):
#print('applying precon')
z_new=precon*r_new
else:
z_new=r_new.copy()
#compute new z_k+1
zr_new=r_new.dot(z_new)
#compute beta_k, which is used to compute p_k+1
beta=zr_new/zr
try:
#compute new p_k+1
p_new=z_new.copy()
p_new.axpy(p,beta)
except:
p_new=z_new+p*beta
#Update values
p=p_new
z=z_new
r=r_new
zr=zr_new
x=x_new
t3=time.time()
if iter in save_iters:
if myrank==0:
x.maps[save_ind].write(outroot+'_'+repr(iter)+save_tail)
if iter in plot_iters:
print('plotting on iteration ',iter)
x.maps[plot_ind].plot(plot_info)
tave=(time.time()-tloop)/maxiter
print('average time per iteration was ',tave,' with effective throughput ',nsamp/tave/1e6,' Msamp/s')
if iter in plot_iters:
print('plotting on iteration ',iter)
x.maps[plot_ind].plot(plot_info)
else:
print('skipping plotting on iter ',iter)
return x
def run_pcg_wprior(b,x0,tods,prior=None,precon=None,maxiter=25,outroot='map',save_iters=[-1],save_ind=0,save_tail='.fits'):
#least squares equations in the presence of a prior - chi^2 = (d-Am)^T N^-1 (d-Am) + (p-m)^T Q^-1 (p-m)
#where p is the prior target for parameters, and Q is the variance. The ensuing equations are
#(A^T N-1 A + Q^-1)m = A^T N^-1 d + Q^-1 p. For non-zero p, it is assumed you have done this already and that
#b=A^T N^-1 d + Q^-1 p
#to have a prior then, whenever we call Ax, just a Q^-1 x to Ax.
t1=time.time()
Ax=tods.dot(x0)
if not(prior is None):
#print('applying prior')
prior.apply_prior(x0,Ax)
try:
r=b.copy()
r.axpy(Ax,-1)
except:
r=b-Ax
if not(precon is None):
z=precon*r
else:
z=r.copy()
p=z.copy()
k=0.0
zr=r.dot(z)
x=x0.copy()
t2=time.time()
for iter in range(maxiter):
if myrank==0:
if iter>0:
print(iter,zr,alpha,t2-t1,t3-t2,t3-t1)
else:
print(iter,zr,t2-t1)
sys.stdout.flush()
t1=time.time()
Ap=tods.dot(p)
if not(prior is None):
#print('applying prior')
prior.apply_prior(p,Ap)
t2=time.time()
pAp=p.dot(Ap)
alpha=zr/pAp
try:
x_new=x.copy()
x_new.axpy(p,alpha)
except:
x_new=x+p*alpha
try:
r_new=r.copy()
r_new.axpy(Ap,-alpha)
except:
r_new=r-Ap*alpha
if not(precon is None):
z_new=precon*r_new
else:
z_new=r_new.copy()
zr_new=r_new.dot(z_new)
beta=zr_new/zr
try:
p_new=z_new.copy()
p_new.axpy(p,beta)
except:
p_new=z_new+p*beta
p=p_new
z=z_new
r=r_new
zr=zr_new
x=x_new
t3=time.time()
if iter in save_iters:
if myrank==0:
x.maps[save_ind].write(outroot+'_'+repr(iter)+save_tail)
return x
def apply_noise(tod,dat=None):
if dat is None:
#dat=tod['dat_calib']
dat=tod.get_data().copy()
dat_rot=np.dot(tod['v'],dat)
datft=mkfftw.fft_r2r(dat_rot)
nn=datft.shape[1]
datft=datft*tod['mywt'][:,0:nn]
dat_rot=mkfftw.fft_r2r(datft)
dat=np.dot(tod['v'].transpose(),dat_rot)
#for fft_r2r, the first/last samples get counted for half of the interior ones, so
#divide them by 2 in the post-filtering. Makes symmetry much happier...
#print 'hello'
dat[:,0]=dat[:,0]*0.5
dat[:,-1]=dat[:,-1]*0.5
return dat
def get_grad_mask_2d(map,todvec=None,thresh=4.0,noisemap=None,hitsmap=None):
"""make a mask that has an estimate of the gradient within a pixel. Look at the
rough expected noise to get an idea of which gradients are substantially larger than
the map noise."""
if noisemap is None:
noisemap=make_hits(todvec,map,do_weights=True)
noisemap.invert()
noisemap.map=np.sqrt(noisemap.map)
if hitsmap is None:
hitsmap=make_hits(todvec,map,do_weights=False)
mygrad=(map.map-np.roll(map.map,1,axis=0))**2
mygrad=mygrad+(map.map-np.roll(map.map,-1,axis=0))**2
mygrad=mygrad+(map.map-np.roll(map.map,-1,axis=1))**2
mygrad=mygrad+(map.map-np.roll(map.map,1,axis=1))**2
mygrad=np.sqrt(0.25*mygrad)
#find the typical timestream noise in a pixel, which should be the noise map times sqrt(hits)
hitsmask=hitsmap.map>0
tmp=noisemap.map.copy()
tmp[hitsmask]=tmp[hitsmask]*np.sqrt(hitsmap.map[hitsmask])
#return mygrad,tmp
mask=(mygrad>(thresh*tmp))
frac=1.0*np.sum(mask)/mask.size
print("Cutting " + repr(frac*100) + "% of map pixels in get_grad_mask_2d.")
mygrad[np.logical_not(mask)]=0
#return mygrad,tmp,noisemap
return mygrad
class null_precon:
def __init__(self):
self.isnull=True
def __add__(self,val):
return val
def __mul__(self,val):
return val
def scaled_airmass_from_el(mat):
airmass=1/np.cos(mat)
airmass=airmass-airmass.mean()
#airmass=airmass/np.std(airmass)
return airmass
class tsGeneric:
"""
Generic timestream model class. Used as a parent for other timestream/map classes. Defines multiple common methods
"""
def __init__(self,tod=None):
#Set file name if tod specified
self.fname=tod.info['fname']
def __mul__(self,to_mul):
#print('calling mul')
#multiplies two timeseries. Returns a a new ts class with the multiplied parameters
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
def clear(self):
#clears parameters
self.params[:]=0
def dot(self,common=None):
#Returns the dot product of a ts class. If common is not specified, returns the self dot product, else returns the dot product with common
if common is None:
return np.sum(self.params*self.params)
else:
return np.sum(self.params*common.params)
def axpy(self,common,a):
#returns ts + a*common
self.params=self.params+a*common.params
def apply_prior(self,x,Ax):
Ax.params=Ax.params+self.params*x.params
def copy(self):
return copy.deepcopy(self)
def write(self,fname=None):
pass
class tsVecs(tsGeneric):
"""
Generic class for timestreams involving vectors, including tools to go from parameters (maps) to tods and back.
Example would be fitting polynomials to timestreams. In this case the self.vecs would be the fit polynomials, self.params are the fit parameters of that polynomial. Then map2tod returns the tods predicted by the polynomials and fit parameters, i.e. self.vec@self.params, while tod2map returns the fit parameters that would generate a given tod.
Attributes
----------
fname: str
name of the tod
tod: tod object
tods corresponding to the timestream
vecs: n_data x nvec matrix
vectors to which fit parameters are applied.
ndet: int
number of detectors in timestream
nvec: int
number of fit vectors
params: np.array, float, nvec x ndet
fit parameters for the vecs.
"""
def __init__(self,tod,vecs):
"""
Parameters
----------
tod: tod object
tod corresponding to the timestream
vecs: n_data x n_predictors matrix
vectors to which fit parameters are applied.
"""
self.vecs=vecs
#self.ndet=tod.info['dat_calib'].shape[0]
self.ndet=tod.get_data_dims()[0]
self.vecs=vecs
self.nvec=vecs.shape[0]
self.params=np.zeros([self.nvec,self.ndet])
def tod2map(self,tod,mat=None,do_add=True,do_omp=False):
"""
Computes the parameters of vecs which yield the given tod.
In general Am = d for A the vecs, m the parameters, and d the tod. tod2map returns m given A and d. Essentially the inverse of map2tod.
Parameters
----------
tod: tod object
The tod to convert to parameters
mat: tod data object, optional
d, the data corresponding to the tod we wish to convert to parameters. If not speicified the data is taken from tod
do_add: bool, optional, default = True
If true, adds the resulting parameters matrix to the existing parameters matrix. If false, overwrites the existing parameters
do_omp: bool, optional, default = False
Defines whether to use omp parallelization. Currently not implemented (?)
Returns
-------
No returns
Side effects
------------
Updates params with the values infered from tod or mat.
"""
if mat is None:
#mat=tod.info['dat_calib']
mat=tod.get_data()
if do_add:
self.params[:]=self.params[:]+np.dot(self.vecs,mat.T)
else:
self.params[:]=np.dot(self.vecs,mat.T)
def map2tod(self,tod,mat=None,do_add=True,do_omp=False):
"""
Given parameters and vecs, compute the corresponding tod.
Given Am = for A the vecs, m the parameters, and d the tod data, return the d corresponding to the specified A and m. Essentially the inverse of tod2map.
Parameters
----------
tod: tod object
The tod, needed for getting the data if to_add is True
mat: tod data object, optional
d, the data corresponding to the tod, only used if to_add is True. If not speicified the data is taken from tod
do_add: bool, optional, default = True
If true, adds the resulting tod data to the existing tod data. If false, overwrites the tod data.
do_omp: bool, optional, default = False
Defines whether to use omp parallelization. Currently not implemented (?)
Returns
-------
No returns
Side effects
------------
Updates tod data with the values infered from params and vecs.
"""
if mat is None:
#mat=tod.info['dat_calib']
mat=tod.get_data()
if do_add:
mat[:]=mat[:]+np.dot(self.params.T,self.vecs)
else:
mat[:]=np.dot(self.params.T,self.vecs)
class tsNotch(tsGeneric):
def __init__(self,tod,numin,numax):
self.fname=tod.info['fname']
tvec=tod.get_tvec()
dt=tvec[-1]-tvec[0]
bw=numax-numin
dnu=1/dt
nfreq=int(np.ceil(2*bw/dnu)) #factor of 2 is to account for partial waves
ndet=tod.get_ndet()
self.freqs=np.linspace(numin,numax,nfreq)
self.nfreq=nfreq
self.params=np.zeros([2*nfreq,ndet])
def get_vecs(self,tvec):
tvec=tvec-tvec[0]
vecs=np.zeros([self.nfreq*2,len(tvec)])
for i in range(self.nfreq):
vecs[2*i,:]=np.cos(tvec*self.freqs[i])
vecs[2*i+1,:]=np.sin(tvec*self.freqs[i])
return vecs
def map2tod(self,tod,mat=None,do_add=True,do_omp=False):
tvec=tod.get_tvec()
vecs=self.get_vecs(tvec)
pred=self.params.T@vecs
if mat is None:
mat=tod.get_data()
if do_add:
mat[:]=mat[:]+pred
else:
mat[:]=pred
def tod2map(self,tod,mat=None,do_add=True,do_omp=False):
tvec=tod.get_tvec()
vecs=self.get_vecs(tvec)
if mat is None:
mat=tod.get_data()
#tmp=mat@(vecs.T)
tmp=vecs@mat.T
if do_add:
self.params[:]=self.params[:]+tmp
else:
self.params[:]=tmp
class tsPoly(tsVecs):
"""
Class for fitting legandre polynomials to tods. Inheritted from tsVecs.
Attributes
----------
fname: str
name of the tod
tod: tod object
tods corresponding to the timestream
vecs: n_data x nvec matrix
Legandre polynomials to which fit parameters are applied.
ndet: int
number of detectors in timestream
nvec: int
number of fit polynomials
params: np.array, float, nvec x ndet
fit parameters for the vecs.
"""
def __init__(self,tod,order=10):
"""Inherits directly from tsVecs. Methods and arguments are the same, the changes are only to how vecs is defined.
Parameters
----------
tod: tod object
tod corresponding to the timestream
order: int
order of the legandre polynomial to fit to the tod
"""
self.fname=tod.info['fname']
#self.ndata=tod.info['dat_calib'].shape[1]
dims=tod.get_data_dims()
self.ndata=dims[1]
self.order=order
#self.ndet=tod.info['dat_calib'].shape[0]
self.ndet=dims[0]
xvec=np.linspace(-1,1,self.ndata)
self.vecs=(np.polynomial.legendre.legvander(xvec,order).T).copy()
self.nvec=self.vecs.shape[0]
self.params=np.zeros([self.nvec,self.ndet])
class tsBowl(tsVecs):
"""
Class for fitting legandre polynomials to tod elevation.
Mustang 2 has observed a consistent problem with gradients in its maps, refered to as bowling. Current thinking is that the ultimate source of the bowling is elevation depended gradients due to the atmosphere. As the sky revolves around a target thru the course of a night, this elevation gradient becomes a radial one. Previous attempts to remove this have fit polynomials to the telescope elevation and subtracted them from the tods, which has been moderately successful. This was done outside the minkasi framework; this class implements that method within the framework.
Attributes
----------
fname: str
name of the tod
tod: tod object
tods corresponding to the timestream
vecs: n_data x nvec matrix
Legandre polynomials to which fit parameters are applied.
ndet: int
number of detectors in timestream
nvec: int
number of fit polynomials
params: np.array, float, nvec x ndet
fit parameters for the vecs.
"""
def __init__(self, tod, order=3):
"""
Inherits directly from tsVecs. Methods and arguments are the same, the changes are only to how vecs is defined.
Parameters
----------
tod: tod object
tod corresponding to the timestream
order: int
order of the legandre polynomial to fit to the tod elevation
"""
self.fname=tod.info['fname']
self.order = order
dims=tod.get_data_dims()
self.ndet=dims[0]
self.ndata=dims[1]
try:
#Apix is the elevation relative to the source. Try to just load it first incase its already computed, otherwise compute it then set it
self.apix = tod.info['apix']
except KeyError:
tod.set_apix()
self.apix = tod.info['apix']
#Normalize apix to run from -1 to 1, to preserve linear independce of leg polys
self.apix /= np.max(np.abs(self.apix), axis = 0)
#TODO: swap legvander to legval
#Array(len(apix), order) of the legander polynomials evaluated at self.apix
self.vecs=(np.polynomial.legendre.legvander(self.apix,order)).copy()
self.nvec=self.vecs.shape[-1]
#Parameters c_ij for the legandre polynomials
self.params=np.zeros([self.ndet,self.nvec])
def map2tod(self, tod, mat = None, do_add = True, do_omp = False):
"""
Given parameters and vecs, compute the corresponding tod.
Given Am = for A the vecs, m the parameters, and d the tod data, return the d corresponding to the specified A and m. This will try to use the jit compiled map2todbowl if it is available, else it will fall back to a slower routine.
Parameters
----------
tod: tod object
The tod, needed for getting the data if to_add is True
mat: tod data object, optional
d, the data corresponding to the tod, only used if to_add is True. If not speicified the data is taken from tod
do_add: bool, optional, default = True
If true, adds the resulting tod data to the existing tod data. If false, overwrites the tod data.
do_omp : bool, optional
Dummy variable to match parameters of other map2tod
Returns
-------
No returns
Side effects
------------
Updates tod data with the values infered from params and vecs.
"""
if mat is None:
mat=tod.get_data()
if do_add:
mat[:]=mat[:] + map2todbowl(self.vecs, self.params)
else:
mat[:]=map2todbowl(self.vecs, self.params)
def tod2map(self, tod, mat = None, do_add = True, do_omp = False):
"""
Given legandre vecs and TOD data, computes the corresponding parameters.
tod2map is the transpose of the linear transformation map2tod. This will use the jit compiled tod2mapbowl if available, else it will fall back on a slower routine.
Parameters
----------
tod : 'minkasi.TOD'
minkasi tod object
mat : np.array, optional
data from tod. If not specified, mat is taken from passed tod
do_add : bool
If true, adds resulting param to existing param. If false, overwrites it.
do_omp : bool
Dummy variable to match parameters of other tod2map
Returns
-------
No returns
Side effects
------------
Updates params with the values infered from mat
"""
if mat is None:
mat = tod.get_data()
if do_add:
self.params = self.params + tod2mapbowl(self.vecs, mat)
else:
self.paras = tod2mapbowl(self.vecs, mat)
def fit_apix(self, tod):
if tod.info['fname'] != self.fname:
print('Error: bowling fitting can only be performed with the tod used to initialize this timestream; {}'.format(tod.info['fname']))
return
for i in range(self.ndet):
self.params[i,...] = np.polynomial.legendre.legfit(self.apix[i], tod.info['dat_calib'][i] - self.drift[i], self.order)
def partition_interval(start,stop,seg_len=100,round_up=False):
#print('partitioning ',start,stop,seg_len)
#make sure we behave correctly if the interval is shorter than the desired segment
if (stop-start)<=seg_len:
return np.asarray([start,stop],dtype='int')
nseg=(stop-start)//seg_len
if nseg*seg_len<(stop-start):
if round_up:
nseg=nseg+1
seg_len=(stop-start)//nseg
nextra=(stop-start)-seg_len*nseg
inds=np.arange(start,stop+1,seg_len)
if nextra>0:
vec=np.zeros(len(inds),dtype='int')
vec[1:nextra+1]=1
vec=np.cumsum(vec)
inds=inds+vec
return inds
def split_partitioned_vec(start,stop,breaks=[],seg_len=100):
if len(breaks)==0:
return partition_interval(start,stop,seg_len)
if breaks[0]==start:
breaks=breaks[1:]
if len(breaks)==0:
return partition_interval(start,stop,seg_len)
if breaks[-1]==stop:
breaks=breaks[:-1]
if len(breaks)==0:
return partition_interval(start,stop,seg_len)
breaks=np.hstack([start,breaks,stop])
nseg=len(breaks)-1
segs=[None]*(nseg)
for i in range(nseg):
inds=partition_interval(breaks[i],breaks[i+1],seg_len)
if i<(nseg-1):
inds=inds[:-1]
segs[i]=inds
segs=np.hstack(segs)
return segs
#breaks,stop,start=0,seg_len=100)
class tsStripes(tsGeneric):
def __init__(self,tod,seg_len=500,do_slope=False,tthresh=10):
dims=tod.get_data_dims()
tvec=tod.get_tvec()
dt=np.median(np.diff(tvec))
splits=np.where(np.abs(np.diff(tvec))>tthresh*dt)[0]
dims=tod.get_data_dims()
inds=split_partitioned_vec(0,dims[1],splits,seg_len)
self.inds=inds
self.splits=splits
self.nseg=len(self.inds)-1
self.params=np.zeros([dims[0],self.nseg])
def tod2map(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
print('need dat in tod2map destriper')
return
minkasi_nb.tod2map_destriped(dat,self.params,self.inds,do_add)
def map2tod(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
print('need dat in map2tod destriper')
return
minkasi_nb.map2tod_destriped(dat,self.params,self.inds,do_add)
def copy(self):
return copy.deepcopy(self)
def set_prior_from_corr(self,corrvec,thresh=0.5):
assert(corrvec.shape[0]==self.params.shape[0])
n=self.params.shape[1]
corrvec=corrvec[:,:n].copy()
corrft=mkfftw.fft_r2r(corrvec)
if thresh>0:
for i in range(corrft.shape[0]):
tt=thresh*np.median(corrft[i,:])
ind=corrft[i,:]<tt
corrft[i,ind]=tt
self.params=1.0/corrft/(2*(n-1))
def apply_prior(self,x,Ax):
xft=mkfftw.fft_r2r(x.params)
Ax.params=Ax.params+mkfftw.fft_r2r(xft*self.params)
class tsBinnedAz(tsGeneric):
def __init__(self,tod,lims=[0,2*np.pi],nbin=360):
#print('nbin is',nbin)
ndet=tod.get_ndet()
self.params=np.zeros([ndet,nbin])
self.lims=[lims[0],lims[1]]
self.nbin=nbin
def map2tod(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
dat=tod.get_data()
minkasi_nb.map2tod_binned_det(dat,self.params,tod.info['az'],self.lims,self.nbin,do_add)
def tod2map(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
dat=tod.get_data()
minkasi_nb.tod2map_binned_det(dat,self.params,tod.info['az'],self.lims,self.nbin,do_add)
class tsBinnedAzShared(tsGeneric):
#"""class to have az shared amongst TODs (say, if you think the ground is constant for a while)"""
def __init__(self,ndet=2,lims=[0,2*np.pi],nbin=360):
self.params=np.zeros([ndet,nbin])
self.lims=[lims[0],lims[1]]
self.nbin=nbin
def map2tod(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
dat=tod.get_data()
minkasi_nb.map2tod_binned_det(dat,self.params,tod.info['az'],self.lims,self.nbin,do_add)
def tod2map(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
dat=tod.get_data()
print('nbin is',self.nbin)
print(self.params.dtype)
minkasi_nb.tod2map_binned_det(dat,self.params,tod.info['az'],self.lims,self.nbin,do_add)
class tsDetAz(tsGeneric):
def __init__(self,tod,npoly=4):
if isinstance(tod,tsDetAz): #we're starting a new instance from an old one, e.g. from copy
self.fname=tod.fname
self.az=tod.az
self.azmin=tod.azmin
self.azmax=tod.azmax
self.npoly=tod.npoly
self.ndet=tod.ndet
else:
self.fname=tod.info['fname']
self.az=tod.info['AZ']
self.azmin=np.min(self.az)
self.azmax=np.max(self.az)
self.npoly=npoly
#self.ndet=tod.info['dat_calib'].shape[0]
self.ndet=tod.get_ndet()
#self.params=np.zeros([self.ndet,self.npoly])
self.params=np.zeros([self.ndet,self.npoly-1])
def _get_polys(self):
polys=np.zeros([self.npoly,len(self.az)])
polys[0,:]=1.0
az_scale= (self.az-self.azmin)/(self.azmax-self.azmin)*2.0-1.0
if self.npoly>1:
polys[1,:]=az_scale
for i in range(2,self.npoly):
polys[i,:]=2*az_scale*polys[i-1,:]-polys[i-2,:]
polys=polys[1:,:].copy()
return polys
def map2tod(self,tod,dat=None,do_add=True,do_omp=False):
if dat is None:
#dat=tod.info['dat_calib']
dat=tod.get_data()
if do_add:
dat[:]=dat[:]+np.dot(self.params,self._get_polys())
else:
dat[:]=np.dot(self.params,self._get_polys())
def tod2map(self,tod,dat=None, do_add=True,do_omp=False):
if dat is None:
#dat=tod.info['dat_calib']
dat=tod.get_data()
if do_add:
#print("params shape is ",self.params.shape)
#self.params[:]=self.params[:]+np.dot(self._get_polys(),dat)
self.params[:]=self.params[:]+np.dot(dat,self._get_polys().T)
else:
#self.params[:]=np.dot(self._get_polys(),dat)
self.params[:]=np.dot(dat,self._get_polys().T)
class tsAirmass:
def __init__(self,tod=None,order=3):
if tod is None:
self.sz=np.asarray([0,0],dtype='int')
self.params=np.zeros(1)
self.fname=''
self.order=0
self.airmass=None
else:
#self.sz=tod.info['dat_calib'].shape
self.sz=tod.get_data_dims()
self.fname=tod.info['fname']
self.order=order
self.params=np.zeros(order)
if not('apix' in tod.info.keys()):
#tod.info['apix']=scaled_airmass_from_el(tod.info['elev'])
self.airmass=scaled_airmass_from_el(tod.info['elev'])
else:
self.airmass=tod.info['apix']
def copy(self,copyMat=False):
cp=tsAirmass()
cp.sz=self.sz
cp.params=self.params.copy()
cp.fname=self.fname
cp.order=self.order
if copyMat:
cp.airmass=self.airmass.copy()
else:
cp.airmass=self.airmass #since this shouldn't change, use a pointer to not blow up RAM
return cp
def clear(self):
self.params[:]=0.0
def dot(self,ts):
return np.sum(self.params*ts.params)
def axpy(self,ts,a):
self.params=self.params+a*ts.params
def _get_current_legmat(self):
x=np.linspace(-1,1,self.sz[1])
m1=np.polynomial.legendre.legvander(x,self.order)
return m1
def _get_current_model(self):
x=np.linspace(-1,1,self.sz[1])
m1=self._get_current_legmat()
poly=np.dot(m1,self.params)
mat=np.repeat([poly],self.sz[0],axis=0)
mat=mat*self.airmass
return mat
def tod2map(self,tod,dat,do_add=True,do_omp=False):
tmp=np.zeros(self.order)
for i in range(self.order):
#tmp[i]=np.sum(tod.info['apix']**(i+1)*dat)
tmp[i]=np.sum(self.airmass**(i+1)*dat)
if do_add:
self.params[:]=self.params[:]+tmp
else:
self.params[:]=tmp
#poly=self._get_current_legmat()
#vec=np.sum(dat*self.airmass,axis=0)
#atd=np.dot(vec,poly)
#if do_add:
# self.params[:]=self.params[:]+atd
#else:
# self.params[:]=atd
def map2tod(self,tod,dat,do_add=True,do_omp=False):
mat=0.0
for i in range(self.order):
#mat=mat+self.params[i]*tod.info['apix']**(i+1)
mat=mat+self.params[i]*self.airmass**(i+1)
#mat=self._get_current_model()
if do_add:
dat[:]=dat[:]+mat
else:
dat[:]=mat
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
def write(self,fname=None):
pass
class tsCommon:
def __init__(self,tod=None,*args,**kwargs):
if tod is None:
self.sz=np.asarray([0,0],dtype='int')
self.params=np.zeros(1)
self.fname=''
else:
#self.sz=tod.info['dat_calib'].shape
self.sz=tod.get_data_dims()
self.params=np.zeros(self.sz[1])
self.fname=tod.info['fname']
def copy(self):
cp=tsCommon()
try:
cp.sz=self.sz.copy()
except:#if the size doesn't have a copy function, then it's probably a number you can just assign
cp.sz=self.sz
cp.fname=self.fname
cp.params=self.params.copy()
return cp
def clear(self):
self.params[:]=0.0
def dot(self,common=None):
if common is None:
return np.dot(self.params,self.params)
else:
return np.dot(self.params,common.params)
def axpy(self,common,a):
self.params=self.params+a*common.params
def tod2map(self,tod,dat,do_add=True,do_omp=False):
#assert(self.fname==tod.info['fname']
nm=tod.info['fname']
if do_add==False:
self.clear()
self.params[:]=self.params[:]+np.sum(dat,axis=0)
def map2tod(self,tod,dat,do_add=True,do_omp=True):
nm=tod.info['fname']
dat[:]=dat[:]+np.repeat([self.params],dat.shape[0],axis=0)
def write(self,fname=None):
pass
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
class detOffset:
def __init__(self,tod=None):
if tod is None:
self.sz=1
self.params=np.zeros(1)
self.fname=''
else:
#self.sz=tod.info['dat_calib'].shape[0]
self.sz=tod.get_ndet()
self.params=np.zeros(self.sz)
self.fname=tod.info['fname']
def copy(self):
cp=detOffset()
cp.sz=self.sz
cp.params=self.params.copy()
cp.fname=self.fname
return cp
def clear(self):
self.params[:]=0
def dot(self,other=None):
if other is None:
return np.dot(self.params,self.params)
else:
return np.dot(self.params,other.params)
def axpy(self,common,a):
self.params=self.params+a*common.params
def tod2map(self,tod,dat,do_add=True,do_omp=False):
if do_add==False:
self.clear()
self.params[:]=self.params[:]+np.sum(dat,axis=1)
def map2tod(self,tod,dat,do_add=True,do_omp=False):
if do_add==False:
dat[:]=0
dat[:]=dat[:]+np.repeat([self.params],dat.shape[1],axis=0).transpose()
def write(self,fname=None):
pass
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
class tsCalib:
def __init__(self,tod=None,model=None):
if tod is None:
self.sz=1
self.params=np.zeros(1)
self.fname=''
self.pred=None
else:
#self.sz=tod.info['dat_calib'].shape[0]
self.sz=tod.get_ndet()
self.params=np.zeros(self.sz)
self.pred=model[tod.info['fname']].copy()
self.fname=tod.info['fname']
def copy(self):
cp=tsCalib()
cp.sz=self.sz
cp.params=self.params.copy()
cp.fname=self.fname
cp.pred=self.pred
return cp
def clear(self):
self.params[:]=0
def dot(self,other=None):
if other is None:
return np.dot(self.params,self.params)
else:
return np.dot(self.params,other.params)
def axpy(self,common,a):
self.params=self.params+a*common.params
def tod2map(self,tod,dat,do_add=True,do_omp=False):
if do_add==False:
self.clear()
if self.pred.ndim==1:
self.params[:]=self.params[:]+np.dot(dat,self.pred)
else:
self.params[:]=self.params[:]+np.sum(dat*self.pred,axis=1)
def map2tod(self,tod,dat,do_add=True,do_omp=False):
if do_add==False:
dat[:]=0
if self.pred.ndim==1:
dat[:]=dat[:]+np.outer(self.params,self.pred)
else:
dat[:]=dat[:]+(self.pred.transpose()*self.params).transpose()
def write(self,fname=None):
pass
def __mul__(self,to_mul):
tt=self.copy()
tt.params=self.params*to_mul.params
return tt
class tsModel:
def __init__(self,todvec=None,modelclass=None,*args,**kwargs):
self.data={}
if todvec is None:
return
for tod in todvec.tods:
nm=tod.info['fname']
self.data[nm]=modelclass(tod,*args,**kwargs)
def copy(self):
new_tsModel=tsModel()
for nm in self.data.keys():
new_tsModel.data[nm]=self.data[nm].copy()
return new_tsModel
def tod2map(self,tod,dat,do_add=True,do_omp=False):
nm=tod.info['fname']
if do_add==False:
self.clear()
self.data[nm].tod2map(tod,dat,do_add,do_omp)
def map2tod(self,tod,dat,do_add=True,do_omp=True):
nm=tod.info['fname']
if do_add==False:
dat[:]=0.0
self.data[nm].map2tod(tod,dat,do_add,do_omp)
def apply_prior(self,x,Ax):
for nm in self.data.keys():
self.data[nm].apply_prior(x.data[nm],Ax.data[nm])
def dot(self,tsmodels=None):
tot=0.0
for nm in self.data.keys():
if tsmodels is None:
tot=tot+self.data[nm].dot(self.data[nm])
else:
#if tsmodels.data.has_key(nm):
if nm in tsmodels.data:
tot=tot+self.data[nm].dot(tsmodels.data[nm])
else:
print('error in tsModel.dot - missing key ',nm)
assert(1==0) #pretty sure we want to crash if missing names
if have_mpi:
tot=comm.allreduce(tot)
return tot
def clear(self):
for nm in self.data.keys():
self.data[nm].clear()
def axpy(self,tsmodel,a):
for nm in self.data.keys():
self.data[nm].axpy(tsmodel.data[nm],a)
def __mul__(self,tsmodel): #this is used in preconditioning - need to fix if ts-based preconditioning is desired
tt=self.copy()
for nm in self.data.keys():
tt.data[nm]=self.data[nm]*tsmodel.data[nm]
return tt
#for nm in tt.data.keys():
# tt.params[nm]=tt.params[nm]*tsmodel.params[nm]
def mpi_reduce(self):
pass
def get_caches(self):
for nm in self.data.keys():
try:
self.data[nm].get_caches()
except:
pass
def clear_caches(self):
for nm in self.data.keys():
try:
self.data[nm].clear_caches()
except:
pass
def mpi_reduce(self):
pass
class tsMultiModel(tsModel):
"""A class to hold timestream models that are shared between groups of TODs."""
def __init__(self,todvec=None,todtags=None,modelclass=None,tag='ts_multi_model',*args,**kwargs):
self.data={}
self.tag=tag
if not(todtags is None):
alltags=comm.allgather(todtags)
alltags=np.hstack(alltags)
alltags=np.unique(alltags)
if not(modelclass is None):
for mytag in alltags:
self.data[mytag]=modelclass(*args,**kwargs)
if not(todvec is None):
for i,tod in enumerate(todvec.tods):
tod.info[tag]=todtags[i]
def copy(self):
return copy.deepcopy(self)
def tod2map(self,tod,dat,do_add=True,do_omp=False):
self.data[tod.info[self.tag]].tod2map(tod,dat,do_add,do_omp)
def map2tod(self,tod,dat,do_add=True,do_omp=False):
self.data[tod.info[self.tag]].map2tod(tod,dat,do_add,do_omp)
def dot(self,tsmodels=None):
tot=0.0
for nm in self.data.keys():
if tsmodels is None:
tot=tot+self.data[nm].dot(self.data[nm])
else:
if nm in tsmodels.data:
tot=tot+self.data[nm].dot(tsmodels.data[nm])
else:
print('error in tsMultiModel.dot - missing key ',nm)
assert(1==0)
return tot
class Mapset:
def __init__(self):
self.nmap=0
self.maps=[]
def add_map(self,map):
self.maps.append(map.copy())
self.nmap=self.nmap+1
def clear(self):
for i in range(self.nmap):
self.maps[i].clear()
def copy(self):
new_mapset=Mapset()
for i in range(self.nmap):
new_mapset.add_map(self.maps[i].copy())
return new_mapset
def dot(self,mapset):
tot=0.0
for i in range(self.nmap):
tot=tot+self.maps[i].dot(mapset.maps[i])
return tot
def axpy(self,mapset,a):
for i in range(self.nmap):
self.maps[i].axpy(mapset.maps[i],a)
def __add__(self,mapset):
mm=self.copy()
mm.axpy(mapset,1.0)
return mm
def __sub__(self,mapset):
mm=self.copy()
mm.axpy(mapset,-1.0)
return mm
def __mul__(self,mapset):
#mm=self.copy()
mm=mapset.copy()
#return mm
for i in range(self.nmap):
#print('callin mul on map ',i)
mm.maps[i]=self.maps[i]*mapset.maps[i]
return mm
def get_caches(self):
for i in range(self.nmap):
self.maps[i].get_caches()
def clear_caches(self):
for i in range(self.nmap):
self.maps[i].clear_caches()
def apply_prior(self,x,Ax):
for i in range(self.nmap):
if not(self.maps[i] is None):
try:
if self.maps[i].isglobal_prior:
#print('applying global prior')
self.maps[i].apply_prior(x,Ax)
else:
self.maps[i].apply_prior(x.maps[i],Ax.maps[i])
except:
#print('going through exception')
self.maps[i].apply_prior(x.maps[i],Ax.maps[i])
def mpi_reduce(self):
if have_mpi:
for map in self.maps:
map.mpi_reduce()
class SkyMap:
def __init__(self,lims,pixsize=0,proj='CAR',pad=2,primes=None,cosdec=None,nx=None,ny=None,mywcs=None,tag='ipix',purge_pixellization=False,ref_equ=False):
if mywcs is None:
assert(pixsize!=0) #we had better have a pixel size if we don't have an incoming WCS that contains it
self.wcs=get_wcs(lims,pixsize,proj,cosdec,ref_equ)
else:
self.wcs=mywcs
pixsize_use=mywcs.wcs.cdelt[1]*np.pi/180
#print('pixel size from wcs and requested are ',pixsize_use,pixsize,100*(pixsize_use-pixsize)/pixsize)
pixsize=pixsize_use
corners=np.zeros([4,2])
corners[0,:]=[lims[0],lims[2]]
corners[1,:]=[lims[0],lims[3]]
corners[2,:]=[lims[1],lims[2]]
corners[3,:]=[lims[1],lims[3]]
pix_corners=self.wcs.wcs_world2pix(corners*180/np.pi,1)
pix_corners=np.round(pix_corners)
#print pix_corners
#print type(pix_corners)
#if pix_corners.min()<0.5:
if pix_corners.min()<-0.5:
print('corners seem to have gone negative in SkyMap projection. not good, you may want to check this.')
if True: #try a patch to fix the wcs xxx
if nx is None:
nx=(pix_corners[:,0].max()+pad)
if ny is None:
ny=(pix_corners[:,1].max()+pad)
else:
nx=(pix_corners[:,0].max()+pad)
ny=(pix_corners[:,1].max()+pad)
#print nx,ny
nx=int(nx)
ny=int(ny)
if not(primes is None):
lens=find_good_fft_lens(2*(nx+ny),primes)
#print 'nx and ny initially are ',nx,ny
nx=lens[lens>=nx].min()
ny=lens[lens>=ny].min()
#print 'small prime nx and ny are now ',nx,ny
self.primes=primes[:]
else:
self.primes=None
self.nx=nx
self.ny=ny
self.lims=lims
self.pixsize=pixsize
self.map=np.zeros([nx,ny])
self.proj=proj
self.pad=pad
self.tag=tag
self.purge_pixellization=purge_pixellization
self.caches=None
self.cosdec=cosdec
self.tod2map_method=None
def get_caches(self):
npix=self.nx*self.ny
nthread=get_nthread()
self.caches=np.zeros([nthread,npix])
def clear_caches(self):
self.map[:]=np.reshape(np.sum(self.caches,axis=0),self.map.shape)
self.caches=None
def set_tod2map(self,method=None,todvec=None):
"""Select which method of tod2map to use. options include simple (1 proc), omp (everyone makes a map copy), everyone (everyone loops through
all the data but assigns only to their own piece), atomic (no map copy, accumulate via atomic adds), and cached (every thread has a sticky
copy of the map)."""
if method is None:
if nproc==1:
self.tod2map_method=self.tod2map_simple
else:
self.tod2map_method=self.tod2map_omp
return
if method=='omp':
self.tod2map_method=self.tod2map_omp
return
if method=='simple':
self.tod2map_method=self.tod2map_simple
if method=='everyone':
if todvec is None:
print('need tods when setting to everyone so we can know which pieces belong to which threads')
for tod in todvec.tods:
ipix=self.get_pix(tod,False)
ipix=ipix.copy()
ipix=np.ravel(ipix)
ipix.sort()
inds=len(ipix)*np.arange(nproc+1)//nproc
inds=np.asarray(inds,dtype='int32')
tod.save_pixellization(self.tag+'_edges',inds)
self.tod2map_method=self.tod2map_everyone
if method=='cached':
self.get_caches()
self.tod2map_method=self.tod2map_cached
if method=='atomic':
self.tod2map_method=self.todmap_atomic
def tod2map_atomic(self,tod,dat):
ipix=self.get_pix(tod)
tod2map_omp(self.map,dat,ipix,True)
def todmap_omp(self,tod,dat):
ipix=self.get_pix(tod)
tod2map_omp(self.map,dat,ipix,False)
def tod2map_simple(self,tod,dat):
ipix=self.get_pix(tod)
tod2map_simple(self.map,dat,ipix)
#def tod2map_cached(self.map,dat,ipix):
# ipix=self.get_pix(tod)
# tod2map_cached(map,dat,ipix)
def copy(self):
if False:
newmap=SkyMap(self.lims,self.pixsize,self.proj,self.pad,self.primes,cosdec=self.cosdec,nx=self.nx,ny=self.ny,mywcs=self.wcs,tag=self.tag)
newmap.map[:]=self.map[:]
return newmap
else:
return copy.deepcopy(self)
def clear(self):
self.map[:]=0
def axpy(self,map,a):
self.map[:]=self.map[:]+a*map.map[:]
def assign(self,arr):
assert(arr.shape[0]==self.nx)
assert(arr.shape[1]==self.ny)
#self.map[:,:]=arr
self.map[:]=arr
def pix_from_radec(self,ra,dec):
ndet=ra.shape[0]
nsamp=ra.shape[1]
nn=ndet*nsamp
coords=np.zeros([nn,2])
#coords[:,0]=np.reshape(tod.info['dx']*180/np.pi,nn)
#coords[:,1]=np.reshape(tod.info['dy']*180/np.pi,nn)
coords[:,0]=np.reshape(ra*180/np.pi,nn)
coords[:,1]=np.reshape(dec*180/np.pi,nn)
#print coords.shape
pix=self.wcs.wcs_world2pix(coords,1)
#print pix.shape
xpix=np.reshape(pix[:,0],[ndet,nsamp])-1 #-1 is to go between unit offset in FITS and zero offset in python
ypix=np.reshape(pix[:,1],[ndet,nsamp])-1
xpix=np.round(xpix)
ypix=np.round(ypix)
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
return ipix
def get_pix(self,tod,savepix=True):
if not(self.tag is None):
ipix=tod.get_saved_pix(self.tag)
if not(ipix is None):
return ipix
ra,dec=tod.get_radec()
#ndet=tod.info['dx'].shape[0]
#nsamp=tod.info['dx'].shape[1]
if False:
ndet=ra.shape[0]
nsamp=ra.shape[1]
nn=ndet*nsamp
coords=np.zeros([nn,2])
#coords[:,0]=np.reshape(tod.info['dx']*180/np.pi,nn)
#coords[:,1]=np.reshape(tod.info['dy']*180/np.pi,nn)
coords[:,0]=np.reshape(ra*180/np.pi,nn)
coords[:,1]=np.reshape(dec*180/np.pi,nn)
#print coords.shape
pix=self.wcs.wcs_world2pix(coords,1)
#print pix.shape
xpix=np.reshape(pix[:,0],[ndet,nsamp])-1 #-1 is to go between unit offset in FITS and zero offset in python
ypix=np.reshape(pix[:,1],[ndet,nsamp])-1
xpix=np.round(xpix)
ypix=np.round(ypix)
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
else:
ipix=self.pix_from_radec(ra,dec)
if savepix:
if not(self.tag is None):
tod.save_pixellization(self.tag,ipix)
return ipix
def map2tod(self,tod,dat,do_add=True,do_omp=True):
ipix=self.get_pix(tod)
#map2tod(dat,self.map,tod.info['ipix'],do_add,do_omp)
map2tod(dat,self.map,ipix,do_add,do_omp)
def tod2map(self,tod,dat=None,do_add=True,do_omp=True):
if dat is None:
dat=tod.get_data()
if do_add==False:
self.clear()
ipix=self.get_pix(tod)
if not(self.caches is None):
#tod2map_cached(self.caches,dat,tod.info['ipix'])
tod2map_cached(self.caches,dat,ipix)
else:
if do_omp:
#tod2map_omp(self.map,dat,tod.info['ipix'])
tod2map_omp(self.map,dat,ipix)
else:
#tod2map_simple(self.map,dat,tod.info['ipix'])
tod2map_simple(self.map,dat,ipix)
if self.purge_pixellization:
tod.clear_saved_pix(self.tag)
def tod2map_old(self,tod,dat=None,do_add=True,do_omp=True):
if dat is None:
dat=tod.get_data()
if do_add==False:
self.clear()
ipix=self.get_pix(tod)
if not(self.caches is None):
#tod2map_cached(self.caches,dat,tod.info['ipix'])
tod2map_cached(self.caches,dat,ipix)
else:
if do_omp:
#tod2map_omp(self.map,dat,tod.info['ipix'])
tod2map_omp(self.map,dat,ipix)
else:
#tod2map_simple(self.map,dat,tod.info['ipix'])
tod2map_simple(self.map,dat,ipix)
if self.purge_pixellization:
tod.clear_saved_pix(self.tag)
def r_th_maps(self):
xvec=np.arange(self.nx)
xvec=xvec-xvec.mean()
yvec=np.arange(self.ny)
yvec=yvec-yvec.mean()
ymat,xmat=np.meshgrid(yvec,xvec)
rmat=np.sqrt(xmat**2+ymat**2)
th=np.arctan2(xmat,ymat)
return rmat,th
def dot(self,map):
tot=np.sum(self.map*map.map)
return tot
def plot(self,plot_info=None):
vmin=self.map.min()
vmax=self.map.max()
clf=True
pause=True
pause_len=0.001
if not(plot_info is None):
if 'vmin' in plot_info.keys():
vmin=plot_info['vmin']
if 'vmax' in plot_info.keys():
vmax=plot_info['vmax']
if 'clf' in plot_info.keys():
clf=plot_info['clf']
if 'pause' in plot_info.keys():
pause=plot_info['pause']
if pause_len in plot_info.keys():
pause_len=plot_info['pause_len']
from matplotlib import pyplot as plt
if clf:
plt.clf()
plt.imshow(self.map,vmin=vmin,vmax=vmax)
if pause:
plt.pause(pause_len)
def write(self,fname='map.fits'):
header=self.wcs.to_header()
if True: #try a patch to fix the wcs xxx
tmp=self.map.transpose().copy()
hdu=fits.PrimaryHDU(tmp,header=header)
else:
hdu=fits.PrimaryHDU(self.map,header=header)
try:
hdu.writeto(fname,overwrite=True)
except:
hdu.writeto(fname,clobber=True)
def __mul__(self,map):
new_map=map.copy()
new_map.map[:]=self.map[:]*map.map[:]
return new_map
def mpi_reduce(self,chunksize=1e5):
#chunksize is added since at least on my laptop mpi4py barfs if it
#tries to reduce an nside=512 healpix map, so need to break it into pieces.
if have_mpi:
#print("reducing map")
if chunksize>0:
nchunk=(1.0*self.nx*self.ny)/chunksize
nchunk=int(np.ceil(nchunk))
else:
nchunk=1
#print('nchunk is ',nchunk)
if nchunk==1:
self.map=comm.allreduce(self.map)
else:
inds=np.asarray(np.linspace(0,self.nx*self.ny,nchunk+1),dtype='int')
if len(self.map.shape)>1:
tmp=np.zeros(self.map.size)
tmp[:]=np.reshape(self.map,len(tmp))
else:
tmp=self.map
for i in range(len(inds)-1):
tmp[inds[i]:inds[i+1]]=comm.allreduce(tmp[inds[i]:inds[i+1]])
#self.map[inds[i]:inds[i+1]]=comm.allreduce(self.map[inds[i]:inds[i+1]])
#tmp=np.zeros(inds[i+1]-inds[i])
#tmp[:]=self.map[inds[i]:inds[i+1]]
#tmp=comm.allreduce(tmp)
#self.map[inds[i]:inds[i+1]]=tmp
if len(self.map.shape)>1:
self.map[:]=np.reshape(tmp,self.map.shape)
#print("reduced")
def invert(self):
mask=np.abs(self.map)>0
self.map[mask]=1.0/self.map[mask]
class MapNoiseWhite:
def __init__(self,ivar_map,isinv=True,nfac=1.0):
self.ivar=read_fits_map(ivar_map)
if not(isinv):
mask=self.ivar>0
self.ivar[mask]=1.0/self.ivar[mask]
self.ivar=self.ivar*nfac
def apply_noise(self,map):
return map*self.ivar
class SkyMapCoarse(SkyMap):
def __init__(self,map):
self.nx=map.shape[0]
try:
self.ny=map.shape[1]
except:
self.ny=1
self.map=map.copy()
def get_caches(self):
return
def clear_caches(self):
return
def copy(self):
cp=copy.copy(self)
cp.map=self.map.copy()
return cp
def get_pix(self):
return
def map2tod(self,*args,**kwargs):
return
def tod2map(self,*args,**kwargs):
return
class SkyMapTwoRes:
"""A pair of maps to serve as a prior for multi-experiment mapping. This would e.g. be the ACT map that e.g. Mustang should agree
with on large scales."""
def __init__(self,map_lowres,lims,osamp=1,smooth_fac=0.0):
small_wcs,lims_use,map_corner=get_aligned_map_subregion_car(lims,map_lowres,osamp=osamp)
self.small_lims=lims_use
self.small_wcs=small_wcs
self.map=read_fits_map(map_lowres)
self.osamp=osamp
self.map_corner=map_corner
self.beamft=None
self.mask=None
self.map_deconvolved=None
self.noise=None
self.fine_prior=None
self.nx_coarse=None
self.ny_coarse=None
self.grid_facs=None
self.isglobal_prior=True
self.smooth_fac=smooth_fac
def copy(self):
return copy.copy(self)
def get_map_deconvolved(self,map_deconvolved):
self.map_deconvolved=read_fits_map(map_deconvolved)
def set_beam_gauss(self,fwhm_pix):
tmp=0*self.map
xvec=get_ft_vec(tmp.shape[0])
yvec=get_ft_vec(tmp.shape[1])
xx,yy=np.meshgrid(yvec,xvec)
rsqr=xx**2+yy**2
sig_pix=fwhm_pix/np.sqrt(8*np.log(2))
beam=np.exp(-0.5*rsqr/(sig_pix**2))
beam=beam/np.sum(beam)
self.beamft=np.fft.rfft2(beam)
def set_beam_1d(self,prof,pixsize):
tmp=0*self.map
xvec=get_ft_vec(tmp.shape[0])
yvec=get_ft_vec(tmp.shape[1])
xx,yy=np.meshgrid(yvec,xvec)
rsqr=xx**2+yy**2
rr=np.sqrt(rsqr)*pixsize
beam=interp(rr,prof[:,0],prof[:,1])
beam=beam/np.sum(beam)
self.beamft=np.fft.rfft2(beam)
def set_noise_white(self,ivar_map,isinv=True,nfac=1.0):
self.noise=MapNoiseWhite(ivar_map,isinv,nfac)
def maps2fine(self,fine,coarse):
out=fine.copy()
for i in range(self.nx_coarse):
for j in range(self.ny_coarse):
out[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)]=coarse[i+self.map_corner[0],j+self.map_corner[1]]
out[self.mask]=fine[self.mask]
return out
def maps2coarse(self,fine,coarse):
out=coarse.copy()
for i in range(self.nx_coarse):
for j in range(self.ny_coarse):
out[i+self.map_corner[0],j+self.map_corner[1]]=(1-self.grid_facs[i,j])*coarse[i+self.map_corner[0],j+self.map_corner[1]]+np.sum(fine[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)])/self.osamp**2
return out
def coarse2maps(self,inmap):
coarse=1.0*inmap
fine=np.zeros([self.nx_coarse*self.osamp,self.ny_coarse*self.osamp])
for i in range(self.nx_coarse):
for j in range(self.ny_coarse):
coarse[i+self.map_corner[0],j+self.map_corner[1]]=(1-self.grid_facs[i,j])*inmap[i+self.map_corner[0],j+self.map_corner[1]]
fine[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)]=inmap[i+self.map_corner[0],j+self.map_corner[1]]/self.osamp**2
fine=fine*self.mask
return coarse,fine
def set_mask(self,hits,thresh=0):
self.mask=hits>thresh
self.fine_prior=0*hits
self.nx_coarse=int(np.round(hits.shape[0]/self.osamp))
self.ny_coarse=int(np.round(hits.shape[1]/self.osamp))
self.grid_facs=np.zeros([self.nx_coarse,self.ny_coarse])
for i in range(self.nx_coarse):
for j in range(self.ny_coarse):
self.grid_facs[i,j]=np.mean(self.mask[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)])
self.fine_prior[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)]=self.map_deconvolved[self.map_corner[0]+i,self.map_corner[1]+j]
def apply_Qinv(self,map):
tmp=self.fine_prior.copy()
tmp[self.mask]=map[self.mask]
tmp2=0*self.map_deconvolved.copy()
for i in range(self.nx_coarse):
for j in range(self.nx_coarse):
tmp2[self.map_corner[0]+i,self.map_corner[1]+j]=np.mean(tmp[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)])
tmp2_conv=np.fft.irfft2(np.fft.rfft2(tmp2)*self.beamft)
tmp2_conv_filt=self.noise.apply_noise(tmp2_conv)
tmp2_reconv=np.fft.irfft2(np.fft.rfft2(tmp2_conv_filt)*self.beamft)
#tmp2_reconv=np.fft.irfft2(np.fft.rfft2(tmp2_conv)*self.beamft)
#tmp2_reconv=tmp2.copy()
fac=1.0/self.osamp**2
for i in range(self.nx_coarse):
for j in range(self.ny_coarse):
tmp[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)]=fac*tmp2_reconv[i+self.map_corner[0],j+self.map_corner[1]]
ans=0.0*tmp
ans[self.mask]=tmp[self.mask]
return ans
def apply_H(self,coarse,fine):
mm=self.maps2coarse(coarse,fine)
mm=self.beam_convolve(mm)
return mm
def apply_HT(self,mm):
mm=self.beam_convolve(mm)
coarse,fine=self.coarse2maps(mm)
return coarse,fine
def get_rhs(self,mapset):
#if map is None:
# map=self.map
#map_filt=self.noise.apply_noise(map)
#map_filt_conv=np.fft.irfft2(np.fft.rfft2(map_filt)*self.beamft)
#tmp=0.0*self.mask
#fac=1.0/self.osamp**2
#for i in range(self.nx_coarse):
# for j in range(self.ny_coarse):
# tmp[(i*self.osamp):((i+1)*self.osamp),(j*self.osamp):((j+1)*self.osamp)]=fac*map_filt_conv[i+self.map_corner[0],j+self.map_corner[1]]
#ans=0*tmp
#ans[self.mask]=tmp[self.mask]
#return ans
coarse_ind=None
fine_ind=None
for i in range(mapset.nmap):
if isinstance(mapset.maps[i],SkyMapCoarse):
coarse_ind=i
else:
if isinstance(mapset.maps[i],SkyMap):
fine_ind=i
if (coarse_ind is None)|(fine_ind is None):
print("Errror in twolevel prior: either fine or coarse skymap not found.")
return
mm=self.noise.apply_noise(self.map)
if True:
coarse,fine=self.apply_HT(mm)
mapset.maps[coarse_ind].map[:]=mapset.maps[coarse_ind].map[:]+coarse
mapset.maps[fine_ind].map[:]=mapset.maps[fine_ind].map[:]+fine
else:
mm=self.beam_convolve(mm)
coarse,fine=self.coarse2maps(mm)
i1=self.map_corner[0]
i2=i1+self.nx_coarse
j1=self.map_corner[1]
j2=j1+self.ny_coarse
coarse[i1:i2,j1:j2]=coarse[i1:i2,j1:j2]*(1-self.grid_facs)
mapset.maps[coarse_ind].map[:]=mapset.maps[coarse_ind].map[:]+coarse
mapset.maps[fine_ind].map[self.mask]=mapset.maps[fine_ind].map[self.mask]+fine[self.mask]/self.osamp**2
def beam_convolve(self,map):
mapft=np.fft.rfft2(map)
mapft=mapft*self.beamft
return np.fft.irfft2(mapft)
def apply_prior(self,mapset,outmapset):
coarse_ind=None
fine_ind=None
for i in range(mapset.nmap):
if isinstance(mapset.maps[i],SkyMapCoarse):
coarse_ind=i
else:
if isinstance(mapset.maps[i],SkyMap):
fine_ind=i
if (coarse_ind is None)|(fine_ind is None):
print("Errror in twolevel prior: either fine or coarse skymap not found.")
return
if True:
mm=self.apply_H(mapset.maps[fine_ind].map,mapset.maps[coarse_ind].map)
mm_filt=self.noise.apply_noise(mm)
coarse,fine=self.apply_HT(mm_filt)
else:
summed=self.maps2coarse(mapset.maps[fine_ind].map,mapset.maps[coarse_ind].map)
summed=self.beam_convolve(summed)
summed=self.noise.apply_noise(summed)
summed=self.beam_convolve(summed)
coarse,fine=self.coarse2maps(summed)
outmapset.maps[fine_ind].map[self.mask]=outmapset.maps[fine_ind].map[self.mask]+fine[self.mask]
outmapset.maps[coarse_ind].map[:]=outmapset.maps[coarse_ind].map[:]+coarse
if self.smooth_fac>0:
summed=self.maps2coarse(mapset.maps[fine_ind].map,mapset.maps[coarse_ind].map)
summed_smooth=self.beam_convolve(summed)
delt=summed-summed_smooth
delt_filt=self.noise.apply_noise(delt)*self.smooth_fac
delt_filt=delt_filt-self.beam_convolve(delt_filt)
coarse,fine=self.coarse2maps(delt_filt)
outmapset.maps[fine_ind].map[self.mask]=outmapset.maps[fine_ind].map[self.mask]+fine[self.mask]
outmapset.maps[coarse_ind].map[:]=outmapset.maps[coarse_ind].map[:]+coarse
def __bust_apply_prior(self,map,outmap):
outmap.map[:]=outmap.map[:]+self.apply_Qinv(map.map)
def poltag2pols(poltag):
if poltag=='I':
return ['I']
if poltag=='IQU':
return ['I','Q','U']
if poltag=='QU':
return ['Q','U']
if poltag=='IQU_PRECON':
return ['I','Q','U','QQ','QU','UU']
if poltag=='QU_PRECON':
return ['QQ','UU','QU']
return None
class PolMap:
def __init__(self,lims,pixsize,poltag='I',proj='CAR',pad=2,primes=None,cosdec=None,nx=None,ny=None,mywcs=None,tag='ipix',purge_pixellization=False,ref_equ=False):
pols=poltag2pols(poltag)
if pols is None:
print('Unrecognized polarization state ' + poltag + ' in PolMap.__init__')
return
npol=len(pols)
if mywcs is None:
self.wcs=get_wcs(lims,pixsize,proj,cosdec,ref_equ)
else:
self.wcs=mywcs
corners=np.zeros([4,2])
corners[0,:]=[lims[0],lims[2]]
corners[1,:]=[lims[0],lims[3]]
corners[2,:]=[lims[1],lims[2]]
corners[3,:]=[lims[1],lims[3]]
pix_corners=self.wcs.wcs_world2pix(corners*180/np.pi,1)
pix_corners=np.round(pix_corners)
#print pix_corners
#print type(pix_corners)
#if pix_corners.min()<0.5:
if pix_corners.min()<-0.5:
print('corners seem to have gone negative in SkyMap projection. not good, you may want to check this.')
if True: #try a patch to fix the wcs xxx
if nx is None:
nx=(pix_corners[:,0].max()+pad)
if ny is None:
ny=(pix_corners[:,1].max()+pad)
else:
nx=(pix_corners[:,0].max()+pad)
ny=(pix_corners[:,1].max()+pad)
#print nx,ny
nx=int(nx)
ny=int(ny)
if not(primes is None):
lens=find_good_fft_lens(2*(nx+ny),primes)
#print 'nx and ny initially are ',nx,ny
nx=lens[lens>=nx].min()
ny=lens[lens>=ny].min()
#print 'small prime nx and ny are now ',nx,ny
self.primes=primes[:]
else:
self.primes=None
self.nx=nx
self.ny=ny
self.npol=npol
self.poltag=poltag
self.pols=pols
self.lims=lims
self.tag=tag
self.purge_pixellization=purge_pixellization
self.pixsize=pixsize
if npol>1:
self.map=np.zeros([nx,ny,npol])
else:
self.map=np.zeros([nx,ny])
self.proj=proj
self.pad=pad
self.caches=None
self.cosdec=cosdec
def get_caches(self):
npix=self.nx*self.ny*self.npol
nthread=get_nthread()
self.caches=np.zeros([nthread,npix])
def clear_caches(self):
self.map[:]=np.reshape(np.sum(self.caches,axis=0),self.map.shape)
self.caches=None
def copy(self):
if False:
newmap=PolMap(self.lims,self.pixsize,self.poltag,self.proj,self.pad,self.primes,cosdec=self.cosdec,nx=self.nx,ny=self.ny,mywcs=self.wcs)
newmap.map[:]=self.map[:]
return newmap
else:
return copy.deepcopy(self)
def clear(self):
self.map[:]=0
def axpy(self,map,a):
self.map[:]=self.map[:]+a*map.map[:]
def assign(self,arr):
assert(arr.shape[0]==self.nx)
assert(arr.shape[1]==self.ny)
if self.npol>1:
assert(arr.shape[2]==self.npol)
#self.map[:,:]=arr
self.map[:]=arr
def set_polstate(self,poltag):
pols=poltag2pols(poltag)
if pols is None:
print('Unrecognized polarization state ' + poltag + ' in PolMap.set_polstate.')
return
npol=len(pols)
self.npol=npol
self.poltag=poltag
self.pols=pols
if npol>1:
self.map=np.zeros([self.nx,self.ny,npol])
else:
self.map=np.zeros([self.nx,self.ny])
def invert(self,thresh=1e-6):
#We can use np.linalg.pinv to reasonably efficiently invert a bunch of tiny matrices with an
#eigenvalue cut. It's more efficient to do this in C, but it only has to be done once per run
if self.npol>1:
if self.poltag=='QU_PRECON':
tmp=np.zeros([self.nx*self.ny,2,2])
tmp[:,0,0]=np.ravel(self.map[:,:,0])
tmp[:,1,1]=np.ravel(self.map[:,:,1])
tmp[:,0,1]=np.ravel(self.map[:,:,2])
tmp[:,1,0]=np.ravel(self.map[:,:,2])
tmp=np.linalg.pinv(tmp,thresh)
self.map[:,:,0]=np.reshape(tmp[:,0,0],[self.map.shape[0],self.map.shape[1]])
self.map[:,:,1]=np.reshape(tmp[:,1,1],[self.map.shape[0],self.map.shape[1]])
self.map[:,:,2]=np.reshape(tmp[:,0,1],[self.map.shape[0],self.map.shape[1]])
if self.poltag=='IQU_PRECON':
#the mapping may seem a bit abstruse here. The preconditioner matrix has entries
# [I Q U ]
# [Q QQ QU ]
# [U QU UU ]
#so the unpacking needs to match the ordering in the C file before we can use pinv
n=self.nx*self.ny
nx=self.nx
ny=self.ny
tmp=np.zeros([self.nx*self.ny,3,3])
tmp[:,0,0]=np.reshape(self.map[:,:,0],n)
tmp[:,0,1]=np.reshape(self.map[:,:,1],n)
tmp[:,1,0]=tmp[:,0,1]
tmp[:,0,2]=np.reshape(self.map[:,:,2],n)
tmp[:,2,0]=tmp[:,0,2]
tmp[:,1,1]=np.reshape(self.map[:,:,3],n)
tmp[:,1,2]=np.reshape(self.map[:,:,4],n)
tmp[:,2,1]=tmp[:,1,2]
tmp[:,2,2]=np.reshape(self.map[:,:,5],n)
alldets=np.linalg.det(tmp)
isbad=alldets<thresh*alldets.max()
ispos=tmp[:,0,0]>0
inds=isbad&ispos
vec=tmp[inds,0,0]
print('determinant range is ' + repr(alldets.max())+ ' ' + repr(alldets.min()))
tmp=np.linalg.pinv(tmp,thresh)
if True:
print('Warning! zeroing out bits like this is super janky. Be warned...')
tmp[isbad,:,:]=0
inds=isbad&ispos
tmp[inds,0,0]=1.0/vec
alldets=np.linalg.det(tmp)
print('determinant range is now ' + repr(alldets.max())+ ' ' + repr(alldets.min()))
self.map[:,:,0]=np.reshape(tmp[:,0,0],[nx,ny])
self.map[:,:,1]=np.reshape(tmp[:,0,1],[nx,ny])
self.map[:,:,2]=np.reshape(tmp[:,0,2],[nx,ny])
self.map[:,:,3]=np.reshape(tmp[:,1,1],[nx,ny])
self.map[:,:,4]=np.reshape(tmp[:,1,2],[nx,ny])
self.map[:,:,5]=np.reshape(tmp[:,2,2],[nx,ny])
else:
mask=self.map!=0
self.map[mask]=1.0/self.map[mask]
def pix_from_radec(self,ra,dec):
ndet=ra.shape[0]
nsamp=ra.shape[1]
nn=ndet*nsamp
coords=np.zeros([nn,2])
coords[:,0]=np.reshape(ra*180/np.pi,nn)
coords[:,1]=np.reshape(dec*180/np.pi,nn)
#print coords.shape
pix=self.wcs.wcs_world2pix(coords,1)
#print pix.shape
xpix=np.reshape(pix[:,0],[ndet,nsamp])-1 #-1 is to go between unit offset in FITS and zero offset in python
ypix=np.reshape(pix[:,1],[ndet,nsamp])-1
xpix=np.round(xpix)
ypix=np.round(ypix)
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
return ipix
def get_pix(self,tod,savepix=True):
if not(self.tag is None):
ipix=tod.get_saved_pix(self.tag)
if not(ipix is None):
return ipix
if False:
ndet=tod.info['dx'].shape[0]
nsamp=tod.info['dx'].shape[1]
nn=ndet*nsamp
coords=np.zeros([nn,2])
coords[:,0]=np.reshape(tod.info['dx']*180/np.pi,nn)
coords[:,1]=np.reshape(tod.info['dy']*180/np.pi,nn)
#print coords.shape
pix=self.wcs.wcs_world2pix(coords,1)
#print pix.shape
xpix=np.reshape(pix[:,0],[ndet,nsamp])-1 #-1 is to go between unit offset in FITS and zero offset in python
ypix=np.reshape(pix[:,1],[ndet,nsamp])-1
xpix=np.round(xpix)
ypix=np.round(ypix)
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
else:
ra,dec=tod.get_radec()
ipix=self.pix_from_radec(ra,dec)
if savepix:
if not(self.tag is None):
tod.save_pixellization(self.tag,ipix)
return ipix
def map2tod(self,tod,dat,do_add=True,do_omp=True):
ipix=self.get_pix(tod)
if self.npol>1:
#polmap2tod(dat,self.map,self.poltag,tod.info['twogamma_saved'],tod.info['ipix'],do_add,do_omp)
polmap2tod(dat,self.map,self.poltag,tod.info['twogamma_saved'],ipix,do_add,do_omp)
else:
#map2tod(dat,self.map,tod.info['ipix'],do_add,do_omp)
map2tod(dat,self.map,ipix,do_add,do_omp)
def tod2map(self,tod,dat,do_add=True,do_omp=True):
if do_add==False:
self.clear()
ipix=self.get_pix(tod)
#print('ipix start is ',ipix[0,0:500:100])
if self.npol>1:
#tod2polmap(self.map,dat,self.poltag,tod.info['twogamma_saved'],tod.info['ipix'])
tod2polmap(self.map,dat,self.poltag,tod.info['twogamma_saved'],ipix)
if self.purge_pixellization:
tod.clear_saved_pix(self.tag)
return
#print("working on nonpolarized bit")
if not(self.caches is None):
#tod2map_cached(self.caches,dat,tod.info['ipix'])
tod2map_cached(self.caches,dat,ipix)
else:
if do_omp:
#tod2map_omp(self.map,dat,tod.info['ipix'])
tod2map_omp(self.map,dat,ipix)
else:
#tod2map_simple(self.map,dat,tod.info['ipix'])
tod2map_simple(self.map,dat,ipix)
if self.purge_pixellization:
tod.clear_saved_pix(self.tag)
def r_th_maps(self):
xvec=np.arange(self.nx)
xvec=xvec-xvec.mean()
yvec=np.arange(self.ny)
yvec=yvec-yvec.mean()
ymat,xmat=np.meshgrid(yvec,xvec)
rmat=np.sqrt(xmat**2+ymat**2)
th=np.arctan2(xmat,ymat)
return rmat,th
def dot(self,map):
tot=np.sum(self.map*map.map)
return tot
def write(self,fname='map.fits'):
header=self.wcs.to_header()
if self.npol>1:
ind=fname.rfind('.')
if ind>0:
if fname[ind+1:]=='fits':
head=fname[:ind]
tail=fname[ind:]
else:
head=fname
tail='.fits'
else:
head=fname
tail='.fits'
tmp=np.zeros([self.ny,self.nx])
for i in range(self.npol):
tmp[:]=np.squeeze(self.map[:,:,i]).T
hdu=fits.PrimaryHDU(tmp,header=header)
try:
hdu.writeto(head+'_'+self.pols[i]+tail,overwrite=True)
except:
hdu.writeto(head+'_'+self.pols[i]+tail,clobber=True)
return
if True: #try a patch to fix the wcs xxx
tmp=self.map.transpose().copy()
hdu=fits.PrimaryHDU(tmp,header=header)
else:
hdu=fits.PrimaryHDU(self.map,header=header)
try:
hdu.writeto(fname,overwrite=True)
except:
hdu.writeto(fname,clobber=True)
def __mul__(self,map):
if self.npol==1:
new_map=self.copy()
new_map.map[:]=self.map[:]*map.map[:]
return new_map
else:
assert(map.poltag+'_PRECON'==self.poltag)
new_map=map.copy()
if self.poltag=='QU_PRECON':
new_map.map[:,:,0]=self.map[:,:,0]*map.map[:,:,0]+self.map[:,:,2]*map.map[:,:,1]
new_map.map[:,:,1]=self.map[:,:,2]*map.map[:,:,0]+self.map[:,:,1]*map.map[:,:,1]
return new_map
if self.poltag=='IQU_PRECON':
#the indices are set such that the preconditioner matrix [I Q U; Q QQ QU; U QU UU] match the C code.
#once we've inverted, the output should be the product of that matrix times [I Q U]
new_map.map[:,:,0]=self.map[:,:,0]*map.map[:,:,0]+self.map[:,:,1]*map.map[:,:,1]+self.map[:,:,2]*map.map[:,:,2]
new_map.map[:,:,1]=self.map[:,:,1]*map.map[:,:,0]+self.map[:,:,3]*map.map[:,:,1]+self.map[:,:,4]*map.map[:,:,2]
new_map.map[:,:,2]=self.map[:,:,2]*map.map[:,:,0]+self.map[:,:,4]*map.map[:,:,1]+self.map[:,:,5]*map.map[:,:,2]
return new_map
print('unrecognized tag in PolMap.__mul__: ' + repr(self.poltag))
assert(1==0)
def mpi_reduce(self,chunksize=1e5):
#chunksize is added since at least on my laptop mpi4py barfs if it
#tries to reduce an nside=512 healpix map, so need to break it into pieces.
if have_mpi:
#print("reducing map")
if chunksize>0:
nchunk=(1.0*self.nx*self.ny*self.npol)/chunksize
nchunk=int(np.ceil(nchunk))
else:
nchunk=1
#print('nchunk is ',nchunk)
if nchunk==1:
self.map=comm.allreduce(self.map)
else:
inds=np.asarray(np.linspace(0,self.nx*self.ny*self.npol,nchunk+1),dtype='int')
if len(self.map.shape)>1:
tmp=np.zeros(self.map.size)
tmp[:]=np.reshape(self.map,len(tmp))
else:
tmp=self.map
for i in range(len(inds)-1):
tmp[inds[i]:inds[i+1]]=comm.allreduce(tmp[inds[i]:inds[i+1]])
#self.map[inds[i]:inds[i+1]]=comm.allreduce(self.map[inds[i]:inds[i+1]])
#tmp=np.zeros(inds[i+1]-inds[i])
#tmp[:]=self.map[inds[i]:inds[i+1]]
#tmp=comm.allreduce(tmp)
#self.map[inds[i]:inds[i+1]]=tmp
if len(self.map.shape)>1:
self.map[:]=np.reshape(tmp,self.map.shape)
#print("reduced")
class HealMap(SkyMap):
def __init__(self,proj='RING',nside=512,tag='ipix'):
if not(have_healpy):
printf("Healpix map requested, but healpy not found.")
return
self.proj=proj
self.nside=nside
self.nx=healpy.nside2npix(self.nside)
self.ny=1
self.caches=None
self.tag=tag
self.map=np.zeros([self.nx,self.ny])
def copy(self):
newmap=HealMap(self.proj,self.nside,self.tag)
newmap.map[:]=self.map[:]
return newmap
def pix_from_radec(self,ra,dec):
ipix=healpy.ang2pix(self.nside,np.pi/2-dec,ra,self.proj=='NEST')
return np.asarray(ipix,dtype='int32')
#def get_pix(self,tod,savepix=True):
# if not(self.tag is None):
# ipix=tod.get_saved_pix(self.tag)
# if not(ipix is None):
# return ipix
# ra,dec=tod.get_radec()
# #ipix=healpy.ang2pix(self.nside,np.pi/2-tod.info['dy'],tod.info['dx'],self.proj=='NEST')
# ipix=healpy.ang2pix(self.nside,np.pi/2-dec,ra,self.proj=='NEST')
# if savepix:
# tod.save_pixellization(self.tag,ipix)
# return ipix
def write(self,fname='map.fits',overwrite=True):
if self.map.shape[1]<=1:
healpy.write_map(fname,self.map[:,0],nest=(self.proj=='NEST'),overwrite=overwrite)
class HealPolMap(PolMap):
def __init__(self,poltag='I',proj='RING',nside=512,tag='ipix',purge_pixellization=False):
if not(have_healpy):
printf("Healpix map requested, but healpy not found.")
return
pols=poltag2pols(poltag)
if pols is None:
print('Unrecognized polarization state ' + poltag + ' in PolMap.__init__')
return
npol=len(pols)
self.proj=proj
self.nside=nside
self.nx=healpy.nside2npix(self.nside)
self.ny=1
self.npol=npol
self.poltag=poltag
self.pols=pols
self.caches=None
self.tag=tag
self.purge_pixellization=purge_pixellization
if self.npol>1:
self.map=np.zeros([self.nx,self.ny,self.npol])
else:
self.map=np.zeros([self.nx,self.ny])
def copy(self):
if False:
newmap=HealPolMap(self.poltag,self.proj,self.nside,self.tag)
newmap.map[:]=self.map[:]
return newmap
else:
return copy.deepcopy(self)
#def get_pix(self,tod):
# ipix=healpy.ang2pix(self.nside,np.pi/2-tod.info['dy'],tod.info['dx'],self.proj=='NEST')
# return ipix
def pix_from_radec(self,ra,dec):
ipix=healpy.ang2pix(self.nside,np.pi/2-dec,ra,self.proj=='NEST')
return np.asarray(ipix,dtype='int32')
#def get_pix(self,tod,savepix=True):
# if not(self.tag is None):
# ipix=tod.get_saved_pix(self.tag)
# if not(ipix is None):
# return ipix
# ra,dec=tod.get_radec()
# ipix=self.pix_from_radec(ra,dec)
# if savepix:
# if not(self.tag is None):
# tod.save_pixellization(self.tag,ipix)
# return ipix
def write(self,fname='map.fits',overwrite=True):
if self.map.shape[1]<=1:
if self.npol==1:
healpy.write_map(fname,self.map[:,0],nest=(self.proj=='NEST'),overwrite=overwrite)
else:
ind=fname.rfind('.')
if ind>0:
if fname[ind+1:]=='fits':
head=fname[:ind]
tail=fname[ind:]
else:
head=fname
tail='.fits'
else:
head=fname
tail='.fits'
#tmp=np.zeros([self.ny,self.nx])
tmp=np.zeros(self.nx)
for i in range(self.npol):
tmp[:]=np.squeeze(self.map[:,:,i]).T
#print('tmp shape is ',tmp.shape)
fname=head+'_'+self.pols[i]+tail
healpy.write_map(fname,tmp,nest=(self.proj=='NEST'),overwrite=overwrite)
#healpy.write_map(fname,tmp[:,0],nest=(self.proj=='NEST'),overwrite=overwrite)
class Cuts:
def __init__(self,tod,do_add=True):
#if class(tod)==Cuts: #for use in copy
if isinstance(tod,Cuts):
self.map=tod.map.copy()
self.bad_inds=tod.bad_inds.copy()
self.namps=tod.nsamp
self.do_add=tod.do_add
return
bad_inds=np.where(tod.info['bad_samples'])
#dims=tod.info['dat_calib'].shape
dims=tod.get_data_dims()
bad_inds=np.ravel_multi_index(bad_inds,dims)
self.nsamp=len(bad_inds)
self.inds=bad_inds
self.map=np.zeros(self.nsamp)
self.do_add=do_add
def clear(self):
self.map[:]=0
def axpy(self,cuts,a):
self.map[:]=self.map[:]+a*cuts.map[:]
def map2tod(self,tod,dat):
dd=np.ravel(dat)
if self.do_add:
dd[self.inds]=self.map
else:
dd[self.inds]+=self.map
def tod2map(self,tod,dat):
dd=np.ravel(dat)
self.map[:]=dd[self.inds]
def dot(self,cuts):
tot=np.dot(self.map,cuts.map)
return tot
def copy(self):
return Cuts(self)
class CutsCompact:
def __init__(self,tod):
if isinstance(tod,CutsCompact):
self.ndet=tod.ndet
self.nseg=tod.nseg
self.istart=tod.istart
self.istop=tod.istop
else:
#ndet=tod.info['dat_calib'].shape[0]
ndet=tod.get_ndet()
self.ndet=ndet
self.nseg=np.zeros(ndet,dtype='int')
self.istart=[None]*ndet
self.istop=[None]*ndet
#self.imax=tod.info['dat_calib'].shape[1]
self.imax=tod.get_ndata()
self.imap=None
self.map=None
def copy(self,deep=True):
copy=CutsCompact(self)
if deep:
if not(self.imap is None):
copy.imap=self.imap.copy()
if not(self.map is None):
copy.map=self.map.copy()
else:
copy.imap=self.imap
copy.map=self.map
return copy
def add_cut(self,det,istart,istop):
if istart>=self.imax:
#this is asking to add a cut past the end of the data.
return
if istop>self.imax: #don't have a cut run past the end of the timestream
istop=self.imax
self.nseg[det]=self.nseg[det]+1
if self.istart[det] is None:
self.istart[det]=[istart]
else:
self.istart[det].append(istart)
if self.istop[det] is None:
self.istop[det]=[istop]
else:
self.istop[det].append(istop)
def get_imap(self):
ncut=0
for det in range(self.ndet):
for i in range(self.nseg[det]):
ncut=ncut+(self.istop[det][i]-self.istart[det][i])
print('ncut is ' + repr(ncut))
self.imap=np.zeros(ncut,dtype='int64')
icur=0
for det in range(self.ndet):
for i in range(self.nseg[det]):
istart=det*self.imax+self.istart[det][i]
istop=det*self.imax+self.istop[det][i]
nn=istop-istart
self.imap[icur:icur+nn]=np.arange(istart,istop)
icur=icur+nn
self.map=np.zeros(len(self.imap))
def cuts_from_array(self,cutmat):
for det in range(cutmat.shape[0]):
nseg,istart,istop=segs_from_vec(cutmat[det,:])
self.nseg[det]=nseg
self.istart[det]=istart
self.istop[det]=istop
def merge_cuts(self):
tmp=np.ones(self.imax+2,dtype='bool')
for det in range(self.ndet):
if self.nseg[det]>1: #if we only have one segment, don't have to worry about strange overlaps
tmp[:]=True
for i in range(self.nseg[det]):
tmp[(self.istart[det][i]+1):(self.istop[det][i]+1)]=False
nseg,istart,istop=segs_from_vec(tmp,pad=False)
self.nseg[det]=nseg
self.istart[det]=istart
self.istop[det]=istop
def tod2map(self,tod,mat=None,do_add=True,do_omp=False):
if mat is None:
#mat=tod.info['dat_calib']
mat=tod.get_data()
tod2cuts_c(self.map.ctypes.data,mat.ctypes.data,self.imap.ctypes.data,len(self.imap),do_add)
def map2tod(self,tod,mat=None,do_add=True,do_omp=False):
if mat is None:
#mat=tod.info['dat_calib']
mat=tod.get_data()
#print('first element is ' + repr(mat[0,self.imap[0]]))
cuts2tod_c(mat.ctypes.data,self.map.ctypes.data,self.imap.ctypes.data,len(self.imap),do_add)
#print('first element is now ' + repr(mat[0,self.imap[0]]))
#return mat
def clear(self):
if not(self.map is None):
self.map[:]=0
def dot(self,other=None):
if self.map is None:
return None
if other is None:
return np.dot(self.map,self.map)
else:
if other.map is None:
return None
return np.dot(self.map,other.map)
def axpy(self,common,a):
self.map=self.map+a*common.map
def write(self,fname=None):
pass
def apply_prior(self,x,Ax):
Ax.map=Ax.map+self.map*x.map
def __mul__(self,to_mul):
tt=self.copy()
tt.map=self.map*to_mul.map
return tt
class SkyMapCar(SkyMap):
def pix_from_radec(self,ra,dec):
xpix=np.round((ra-self.lims[0])*self.cosdec/self.pixsize)
#ypix=np.round((dec-self.lims[2])/self.pixsize)
ypix=((dec-self.lims[2])/self.pixsize)+0.5
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
return ipix
class SkyMapCarOld:
def __init__(self,lims,pixsize):
try:
self.lims=lims.copy()
except:
self.lims=lims[:]
self.pixsize=pixsize
self.cosdec=np.cos(0.5*(lims[2]+lims[3]))
nx=int(np.ceil((lims[1]-lims[0])/pixsize*self.cosdec))
ny=int(np.ceil((lims[3]-lims[2])/pixsize))
self.nx=nx
self.ny=ny
self.npix=nx*ny
self.map=np.zeros([nx,ny])
def copy(self):
mycopy=SkyMapCar(self.lims,self.pixsize)
mycopy.map[:]=self.map[:]
return mycopy
def clear(self):
self.map[:,:]=0
def axpy(self,map,a):
self.map[:]=self.map[:]+a*map.map[:]
def assign(self,arr):
assert(arr.shape[0]==self.nx)
assert(arr.shape[1]==self.ny)
self.map[:,:]=arr
def get_pix(self,tod):
xpix=np.round((tod.info['dx']-self.lims[0])*self.cosdec/self.pixsize)
ypix=np.round((tod.info['dy']-self.lims[2])/self.pixsize)
#ipix=np.asarray(ypix*self.nx+xpix,dtype='int32')
ipix=np.asarray(xpix*self.ny+ypix,dtype='int32')
return ipix
def map2tod(self,tod,dat,do_add=True,do_omp=True):
map2tod(dat,self.map,tod.info['ipix'],do_add,do_omp)
def tod2map(self,tod,dat,do_add=True,do_omp=True):
if do_add==False:
self.clear()
if do_omp:
tod2map_omp(self.map,dat,tod.info['ipix'])
else:
tod2map_simple(self.map,dat,tod.info['ipix'])
def r_th_maps(self):
xvec=np.arange(self.nx)
xvec=xvec-xvec.mean()
yvec=np.arange(self.ny)
yvec=yvec-yvec.mean()
ymat,xmat=np.meshgrid(yvec,xvec)
rmat=np.sqrt(xmat**2+ymat**2)
th=np.arctan2(xmat,ymat)
return rmat,th
def dot(self,map):
tot=np.sum(self.map*map.map)
return tot
def find_bad_skew_kurt(dat,skew_thresh=6.0,kurt_thresh=5.0):
ndet=dat.shape[0]
isgood=np.ones(ndet,dtype='bool')
skew=np.mean(dat**3,axis=1)
mystd=np.std(dat,axis=1)
skew=skew/mystd**1.5
mykurt=np.mean(dat**4,axis=1)
kurt=mykurt/mystd**4-3
isgood[np.abs(skew)>skew_thresh*np.median(np.abs(skew))]=False
isgood[np.abs(kurt)>kurt_thresh*np.median(np.abs(kurt))]=False
return skew,kurt,isgood
def timestreams_from_gauss(ra,dec,fwhm,tod,pred=None):
if pred is None:
#pred=np.zeros(tod.info['dat_calib'].shape)
pred=tod.get_empty(True)
#n=tod.info['dat_calib'].size
n=np.product(tod.get_data_dims())
assert(pred.size==n)
npar_src=4 #x,y,sig,amp
dx=tod.info['dx']
dy=tod.info['dy']
pp=np.zeros(npar_src)
pp[0]=ra
pp[1]=dec
pp[2]=fwhm/np.sqrt(8*np.log(2))*np.pi/180/3600
pp[3]=1
fill_gauss_src_c(pp.ctypes.data,dx.ctypes.data,dy.ctypes.data,pred.ctypes.data,n)
return pred
def timestreams_from_isobeta_c(params,tod,pred=None):
if pred is None:
#pred=np.zeros(tod.info['dat_calib'].shape)
pred=tod.get_empty(True)
#n=tod.info['dat_calib'].size
n=np.product(tod.get_data_dims())
assert(pred.size==n)
dx=tod.info['dx']
dy=tod.info['dy']
fill_isobeta_c(params.ctypes.data,dx.ctypes.data,dy.ctypes.data,pred.ctypes.data,n)
npar_beta=5 #x,y,theta,beta,amp
npar_src=4 #x,y,sig,amp
nsrc=(params.size-npar_beta)//npar_src
for i in range(nsrc):
pp=np.zeros(npar_src)
ioff=i*npar_src+npar_beta
pp[:]=params[ioff:(ioff+npar_src)]
fill_gauss_src_c(pp.ctypes.data,dx.ctypes.data,dy.ctypes.data,pred.ctypes.data,n)
return pred
def derivs_from_elliptical_isobeta(params,tod,*args,**kwargs):
npar=len(params)
assert(npar==7)
pred=tod.get_empty()
dims=np.hstack([npar,pred.shape])
derivs=np.empty(dims)
dx=tod.info['dx']
dy=tod.info['dy']
minkasi_nb.fill_elliptical_isobeta_derivs(params,dx,dy,pred,derivs)
return derivs,pred
def derivs_from_elliptical_gauss(params,tod,*args,**kwargs):
npar=len(params)
assert(npar==6)
pred=tod.get_empty()
dims=np.hstack([npar,pred.shape])
derivs=np.empty(dims)
dx=tod.info['dx']
dy=tod.info['dy']
minkasi_nb.fill_elliptical_gauss_derivs(params,dx,dy,pred,derivs)
return derivs,pred
def derivs_from_isobeta_c(params,tod,*args,**kwargs):
npar=5;
#n=tod.info['dat_calib'].size
dims=tod.get_data_dims()
n=np.product(dims)
#sz_deriv=np.append(npar,tod.info['dat_calib'].shape)
sz_deriv=np.append(npar,dims)
#pred=np.zeros(tod.info['dat_calib'].shape)
pred=tod.get_empty(True)
derivs=np.zeros(sz_deriv)
dx=tod.info['dx']
dy=tod.info['dy']
fill_isobeta_derivs_c(params.ctypes.data,dx.ctypes.data,dy.ctypes.data,pred.ctypes.data,derivs.ctypes.data,n)
return derivs,pred
def derivs_from_gauss_c(params,tod,*args,**kwargs):
npar=4
#n=tod.info['dat_calib'].size
n=tod.get_nsamp()
#sz_deriv=np.append(npar,tod.info['dat_calib'].shape)
sz_deriv=np.append(npar,tod.get_data_dims())
#pred=np.zeros(tod.info['dat_calib'].shape)
pred=tod.get_empty(True)
derivs=np.zeros(sz_deriv)
dx=tod.info['dx']
dy=tod.info['dy']
fill_gauss_derivs_c(params.ctypes.data,dx.ctypes.data,dy.ctypes.data,pred.ctypes.data,derivs.ctypes.data,n)
return derivs,pred
def derivs_from_map(pars,tod,fun,map,dpar,do_symm=False,*args,**kwargs):
#print('do_symm is ',do_symm)
pred=tod.get_empty()
fun(map,pars,*args,**kwargs)
map.map2tod(tod,pred,False)
npar=len(pars)
tmp=tod.get_empty()
if do_symm:
tmp2=tod.get_empty()
derivs=np.empty([npar,pred.shape[0],pred.shape[1]])
for i in range(npar):
pp=pars.copy()
pp[i]=pp[i]+dpar[i]
fun(map,pp,*args,**kwargs)
tmp[:]=0 #strictly speaking, we shouldn't need this, but it makes us more robust to bugs elsewhere
map.map2tod(tod,tmp,False)
if do_symm:
pp=pars.copy()
pp[i]=pp[i]-dpar[i]
fun(map,pp,*args,**kwargs)
tmp2[:]=0
map.map2tod(tod,tmp2,False)
derivs[i,:,:]=(tmp-tmp2)/(2*dpar[i])
else:
derivs[i,:,:]=(tmp-pred)/(dpar[i])
#pred=np.reshape(pred,pred.size)
#derivs=np.reshape(derivs,[derivs.shape[0],derivs.shape[1]*derivs.shape[2]])
return derivs,pred
def timestreams_from_isobeta(params,tod):
npar_beta=5 #x,y,theta,beta,amp
npar_src=4 #x,y,sig,amp
nsrc=(params.size-npar_beta)//npar_src
assert(params.size==nsrc*npar_src+npar_beta)
x0=params[0]
y0=params[1]
theta=params[2]
beta=params[3]
amp=params[4]
cosdec=np.cos(y0)
dx=(tod.info['dx']-x0)*cosdec
dy=tod.info['dy']-y0
rsqr=dx*dx+dy*dy
rsqr=rsqr/theta**2
#print rsqr.max()
pred=amp*(1+rsqr)**(0.5-1.5*beta)
for i in range(nsrc):
src_x=params[i*npar_src+npar_beta+0]
src_y=params[i*npar_src+npar_beta+1]
src_sig=params[i*npar_src+npar_beta+2]
src_amp=params[i*npar_src+npar_beta+3]
dx=tod.info['dx']-src_x
dy=tod.info['dy']-src_y
rsqr=( (dx*np.cos(src_y))**2+dy**2)
pred=pred+src_amp*np.exp(-0.5*rsqr/src_sig**2)
return pred
def isobeta_src_chisq(params,tods):
chisq=0.0
for tod in tods.tods:
pred=timestreams_from_isobeta_c(params,tod)
#chisq=chisq+tod.timestream_chisq(tod.info['dat_calib']-pred)
chisq=chisq+tod.timestream_chisq(tod.get_data()-pred)
return chisq
npar_beta=5 #x,y,theta,beta,amp
npar_src=4 #x,y,sig,amp
nsrc=(params.size-npar_beta)//npar_src
assert(params.size==nsrc*npar_src+npar_beta)
x0=params[0]
y0=params[1]
theta=params[2]
beta=params[3]
amp=params[4]
cosdec=np.cos(y0)
chisq=0.0
for tod in tods.tods:
dx=tod.info['dx']-x0
dy=tod.info['dy']-y0
rsqr=(dx*cosdec)**2+dy**2
pred=amp*(1+rsqr/theta**2)**(0.5-1.5*beta)
for i in range(nsrc):
src_x=params[i*npar_src+npar_beta+0]
src_y=params[i*npar_src+npar_beta+1]
src_sig=params[i*npar_src+npar_beta+2]
src_amp=params[i*npar_src+npar_beta+3]
dx=tod.info['dx']-src_x
dy=tod.info['dy']-src_y
rsqr=( (dx*np.cos(src_y))**2+dy**2)
pred=pred+src_amp*np.exp(-0.5*rsqr/src_sig**2)
#chisq=chisq+tod.timestream_chisq(tod.info['dat_calib']-pred)
chisq=chisq+tod.timestream_chisq(tod.get_data()-pred)
return chisq
class NoiseBinnedDet:
def __init__(self,dat,dt,freqs=None,scale_facs=None):
ndet=dat.shape[0]
ndata=dat.shape[1]
nn=2*(ndata-1)
dnu=1/(nn*dt)
bins=np.asarray(freqs/dnu,dtype='int')
bins=bins[bins<ndata]
bins=np.hstack([bins,ndata])
if bins[0]>0:
bins=np.hstack([0,bins])
if bins[0]<0:
bins[0]=0
self.bins=bins
nbin=len(bins)-1
self.nbin=nbin
det_ps=np.zeros([ndet,nbin])
datft=mkfftw.fft_r2r(dat)
for i in range(nbin):
det_ps[:,i]=1.0/np.mean(datft[:,bins[i]:bins[i+1]]**2,axis=1)
self.det_ps=det_ps
self.ndata=ndata
self.ndet=ndet
self.nn=nn
def apply_noise(self,dat):
datft=mkfftw.fft_r2r(dat)
for i in range(self.nbin):
#datft[:,self.bins[i]:self.bins[i+1]]=datft[:,self.bins[i]:self.bins[i+1]]*np.outer(self.det_ps[:,i],self.bins[i+1]-self.bins[i])
datft[:,self.bins[i]:self.bins[i+1]]=datft[:,self.bins[i]:self.bins[i+1]]*np.outer(self.det_ps[:,i],np.ones(self.bins[i+1]-self.bins[i]))
dd=mkfftw.fft_r2r(datft)
dd[:,0]=0.5*dd[:,0]
dd[:,-1]=0.5*dd[:,-1]
return dd
class NoiseWhite:
def __init__(self,dat):
#this is the ratio between the median absolute
#deviation of the diff and sigma
fac=scipy.special.erfinv(0.5)*2
sigs=np.median(np.abs(np.diff(dat,axis=1)),axis=1)/fac
self.sigs=sigs
self.weights=1/sigs**2
def apply_noise(self,dat):
assert(dat.shape[0]==len(self.weights))
ndet=dat.shape[0]
for i in range(ndet):
dat[i,:]=dat[i,:]*self.weights[i]
return dat
class NoiseWhiteNotch:
def __init__(self,dat,numin,numax,tod):
fac=scipy.special.erfinv(0.5)*2
sigs=np.median(np.abs(np.diff(dat,axis=1)),axis=1)/fac
self.sigs=sigs
self.weights=1/sigs**2
self.weights=self.weights/(2*(dat.shape[1]-1)) #fold in fft normalization to the weights
tvec=tod.get_tvec()
dt=np.median(np.diff(tvec))
tlen=tvec[-1]-tvec[0]
dnu=1.0/(2*tlen-dt)
self.istart=int(np.floor(numin/dnu))
self.istop=int(np.ceil(numax/dnu))+1
def apply_noise(self,dat):
assert(dat.shape[0]==len(self.weights))
datft=mkfftw.fft_r2r(dat)
datft[:,self.istart:self.istop]=0
dat=mkfftw.fft_r2r(datft)
ndet=dat.shape[0]
for i in range(ndet):
dat[i,:]=dat[i,:]*self.weights[i]
return dat
class NoiseBinnedEig:
def __init__(self,dat,dt,freqs=None,scale_facs=None,thresh=5.0):
ndet=dat.shape[0]
ndata=dat.shape[1]
nn=2*(ndata-1)
mycov=np.dot(dat,dat.T)
mycov=0.5*(mycov+mycov.T)
ee,vv=np.linalg.eig(mycov)
mask=ee>thresh*thresh*np.median(ee)
vecs=vv[:,mask]
ts=np.dot(vecs.T,dat)
resid=dat-np.dot(vv[:,mask],ts)
dnu=1/(nn*dt)
print('dnu is ' + repr(dnu))
bins=np.asarray(freqs/dnu,dtype='int')
bins=bins[bins<ndata]
bins=np.hstack([bins,ndata])
if bins[0]>0:
bins=np.hstack([0,bins])
if bins[0]<0:
bins[0]=0
self.bins=bins
nbin=len(bins)-1
self.nbin=nbin
nmode=ts.shape[0]
det_ps=np.zeros([ndet,nbin])
mode_ps=np.zeros([nmode,nbin])
residft=mkfftw.fft_r2r(resid)
modeft=mkfftw.fft_r2r(ts)
for i in range(nbin):
det_ps[:,i]=1.0/np.mean(residft[:,bins[i]:bins[i+1]]**2,axis=1)
mode_ps[:,i]=1.0/np.mean(modeft[:,bins[i]:bins[i+1]]**2,axis=1)
self.modes=vecs.copy()
if not(np.all(np.isfinite(det_ps))):
print("warning - have non-finite numbers in noise model. This should not be unexpected.")
det_ps[~np.isfinite(det_ps)]=0.0
self.det_ps=det_ps
self.mode_ps=mode_ps
self.ndata=ndata
self.ndet=ndet
self.nn=nn
def apply_noise(self,dat):
assert(dat.shape[0]==self.ndet)
assert(dat.shape[1]==self.ndata)
datft=mkfftw.fft_r2r(dat)
for i in range(self.nbin):
n=self.bins[i+1]-self.bins[i]
#print('bins are ',self.bins[i],self.bins[i+1],n,datft.shape[1])
tmp=self.modes*np.outer(self.det_ps[:,i],np.ones(self.modes.shape[1]))
mat=np.dot(self.modes.T,tmp)
mat=mat+np.diag(self.mode_ps[:,i])
mat_inv=np.linalg.inv(mat)
Ax=datft[:,self.bins[i]:self.bins[i+1]]*np.outer(self.det_ps[:,i],np.ones(n))
tmp=np.dot(self.modes.T,Ax)
tmp=np.dot(mat_inv,tmp)
tmp=np.dot(self.modes,tmp)
tmp=Ax-tmp*np.outer(self.det_ps[:,i],np.ones(n))
datft[:,self.bins[i]:self.bins[i+1]]=tmp
#print(tmp.shape,mat.shape)
dd=mkfftw.fft_r2r(datft)
dd[:,0]=0.5*dd[:,0]
dd[:,-1]=0.5*dd[:,-1]
return dd
class NoiseCMWhite:
def __init__(self,dat):
print('setting up noise cm white')
u,s,v=np.linalg.svd(dat,0)
self.ndet=len(s)
ind=np.argmax(s)
self.v=np.zeros(self.ndet)
self.v[:]=u[:,ind]
pred=np.outer(self.v*s[ind],v[ind,:])
dat_clean=dat-pred
myvar=np.std(dat_clean,1)**2
self.mywt=1.0/myvar
def apply_noise(self,dat,dd=None):
t1=time.time()
mat=np.dot(self.v,np.diag(self.mywt))
lhs=np.dot(self.v,mat.T)
rhs=np.dot(mat,dat)
if isinstance(lhs,np.ndarray):
cm=np.dot(np.linalg.inv(lhs),rhs)
else:
cm=rhs/lhs
t2=time.time()
if dd is None:
dd=np.empty(dat.shape)
if have_numba:
np.outer(-self.v,cm,dd)
t3=time.time()
#dd[:]=dd[:]+dat
minkasi_nb.axpy_in_place(dd,dat)
minkasi_nb.scale_matrix_by_vector(dd,self.mywt)
else:
dd=dat-np.outer(self.v,cm)
#print(dd[:4,:4])
t3=time.time()
tmp=np.repeat([self.mywt],len(cm),axis=0).T
dd=dd*tmp
t4=time.time()
#print(t2-t1,t3-t2,t4-t3)
return dd
def get_det_weights(self):
return self.mywt.copy()
class NoiseSmoothedSVD:
def __init__(self,dat_use,fwhm=50,prewhiten=False,fit_powlaw=False,u_in=None):
if prewhiten:
noisevec=np.median(np.abs(np.diff(dat_use,axis=1)),axis=1)
dat_use=dat_use/(np.repeat([noisevec],dat_use.shape[1],axis=0).transpose())
if u_in is None:
u,s,v=np.linalg.svd(dat_use,0)
ndet=s.size
else:
u=u_in
assert(u.shape[0]==u.shape[1])
ndet=u.shape[0]
#print(u.shape,s.shape,v.shape)
print('got svd')
n=dat_use.shape[1]
self.v=np.zeros([ndet,ndet])
self.v[:]=u.transpose()
if u_in is None:
self.vT=self.v.T
else:
self.vT=np.linalg.inv(self.v)
dat_rot=np.dot(self.v,dat_use)
if fit_powlaw:
spec_smooth=0*dat_rot
for ind in range(ndet):
fitp,datsqr,C=fit_ts_ps(dat_rot[ind,:]);
spec_smooth[ind,1:]=C
else:
dat_trans=mkfftw.fft_r2r(dat_rot)
spec_smooth=smooth_many_vecs(dat_trans**2,fwhm)
spec_smooth[:,1:]=1.0/spec_smooth[:,1:]
spec_smooth[:,0]=0
if prewhiten:
self.noisevec=noisevec.copy()
else:
self.noisevec=None
self.mywt=spec_smooth
def apply_noise(self,dat):
if not(self.noisevec is None):
noisemat=np.repeat([self.noisevec],dat.shape[1],axis=0).transpose()
dat=dat/noisemat
dat_rot=np.dot(self.v,dat)
datft=mkfftw.fft_r2r(dat_rot)
nn=datft.shape[1]
datft=datft*self.mywt[:,:nn]
dat_rot=mkfftw.fft_r2r(datft)
#dat=np.dot(self.v.T,dat_rot)
dat=np.dot(self.vT,dat_rot)
dat[:,0]=0.5*dat[:,0]
dat[:,-1]=0.5*dat[:,-1]
if not(self.noisevec is None):
#noisemat=np.repeat([self.noisevec],dat.shape[1],axis=0).transpose()
dat=dat/noisemat
return dat
def apply_noise_wscratch(self,dat,tmp,tmp2):
if not(self.noisevec is None):
noisemat=np.repeat([self.noisevec],dat.shape[1],axis=0).transpose()
dat=dat/noisemat
dat_rot=tmp
dat_rot=np.dot(self.v,dat,dat_rot)
dat=tmp2
datft=dat
datft=mkfftw.fft_r2r(dat_rot,datft)
nn=datft.shape[1]
datft[:]=datft*self.mywt[:,:nn]
dat_rot=tmp
dat_rot=mkfftw.fft_r2r(datft,dat_rot)
#dat=np.dot(self.v.T,dat_rot)
dat=np.dot(self.vT,dat_rot,dat)
dat[:,0]=0.5*dat[:,0]
dat[:,-1]=0.5*dat[:,-1]
if not(self.noisevec is None):
#noisemat=np.repeat([self.noisevec],dat.shape[1],axis=0).transpose()
dat=dat/noisemat
return dat
def get_det_weights(self):
"""Find the per-detector weights for use in making actual noise maps."""
mode_wt=np.sum(self.mywt,axis=1)
#tmp=np.dot(self.v.T,np.dot(np.diag(mode_wt),self.v))
tmp=np.dot(self.vT,np.dot(np.diag(mode_wt),self.v))
return np.diag(tmp).copy()*2.0
class Tod:
def __init__(self,info):
self.info=info.copy()
self.jumps=None
self.cuts=None
self.noise=None
self.noise_delayed=False
def lims(self):
xmin=self.info['dx'].min()
xmax=self.info['dx'].max()
ymin=self.info['dy'].min()
ymax=self.info['dy'].max()
return xmin,xmax,ymin,ymax
def set_apix(self):
'''calculates dxel normalized to +-1 from elevation'''
#TBD pass in and calculate scan center's elevation vs time
elev=np.mean(self.info['elev'],axis=0)
x=np.arange(elev.shape[0])/elev.shape[0]
a=np.polyfit(x,elev,2)
ndet=self.info['elev'].shape[0]
track_elev,xel=np.meshgrid(a[2]+a[1]*x+a[0]*x**2,np.ones(ndet))
delev=self.info['elev'] - track_elev
ml=np.max(np.abs(delev))
self.info['apix']=delev/ml
def get_ndet(self):
return self.info['dat_calib'].shape[0]
def get_ndata(self):
return self.info['dat_calib'].shape[1]
def get_nsamp(self):
#get total number of timestream samples, not samples per detector
#return np.product(self.info['dat_calib'].shape)
return self.get_ndet()*self.get_ndata()
def get_saved_pix(self,tag=None):
if tag is None:
return None
if tag in self.info.keys():
return self.info[tag]
else:
return None
def clear_saved_pix(self,tag=None):
if tag is None:
return
if tag in self.info.keys():
del(self.info[tag])
def save_pixellization(self,tag,ipix):
if tag in self.info.keys():
print('warning - overwriting key ',tag,' in tod.save_pixellization.')
self.info[tag]=ipix
def get_data_dims(self):
return (self.get_ndet(),self.get_ndata())
#dims=self.info['dat_calib'].shape
#if len(dims)==1:
# dims=np.asarray([1,dims[0]],dtype='int')
#return dims
#return self.info['dat_calib'].shape
def get_data(self):
return self.info['dat_calib']
def get_tvec(self):
return self.info['ctime']
def get_radec(self):
return self.info['dx'],self.info['dy']
def get_empty(self,clear=False):
if 'dtype' in self.info.keys():
dtype=self.info['dtype']
elif 'dat_calib' in self.info.keys():
dtype=self.info['dat_calib'].dtype
else:
dtype='float'
if clear:
#return np.zeros(self.info['dat_calib'].shape,dtype=self.info['dat_calib'].dtype)
return np.zeros([self.get_ndet(),self.get_ndata()],dtype=dtype)
else:
#return np.empty(self.info['dat_calib'].shape,dtype=self.info['dat_calib'].dtype)
return np.empty([self.get_ndet(),self.get_ndata()],dtype=dtype)
def set_tag(self,tag):
self.info['tag']=tag
def set_pix(self,map):
ipix=map.get_pix(self)
#self.info['ipix']=ipix
self.info[map.tag]=ipix
def copy(self,copy_info=False):
if copy_info:
myinfo=self.info.copy()
for key in myinfo.keys():
try:
myinfo[key]=self.info[key].copy()
except:
pass
tod=Tod(myinfo)
else:
tod=Tod(self.info)
if not(self.jumps is None):
try:
tod.jumps=self.jumps.copy()
except:
tod.jumps=self.jumps[:]
if not(self.cuts is None):
try:
tod.cuts=self.cuts.copy()
except:
tod.cuts=self.cuts[:]
tod.cuts=self.cuts[:]
tod.noise=self.noise
return tod
def set_noise(self,modelclass=NoiseSmoothedSVD,dat=None,delayed=False,*args,**kwargs):
if delayed:
self.noise_args=copy.deepcopy(args)
self.noise_kwargs=copy.deepcopy(kwargs)
self.noise_delayed=True
self.noise_modelclass=modelclass
else:
self.noise_delayed=False
if dat is None:
dat=self.info['dat_calib']
self.noise=modelclass(dat,*args,**kwargs)
def get_det_weights(self):
if self.noise is None:
print("noise model not set in get_det_weights.")
return None
try:
return self.noise.get_det_weights()
except:
print("noise model does not support detector weights in get_det_weights.")
return None
def set_noise_white_masked(self):
self.info['noise']='white_masked'
self.info['mywt']=np.ones(self.info['dat_calib'].shape[0])
def apply_noise_white_masked(self,dat=None):
if dat is None:
dat=self.info['dat_calib']
dd=self.info['mask']*dat*np.repeat([self.info['mywt']],self.info['dat_calib'].shape[1],axis=0).transpose()
return dd
def set_noise_cm_white(self):
print('deprecated usage - please switch to tod.set_noise(minkasi.NoiseCMWhite)')
self.set_noise(NoiseCMWhite)
return
u,s,v=np.linalg.svd(self.info['dat_calib'],0)
ndet=len(s)
ind=np.argmax(s)
mode=np.zeros(ndet)
#mode[:]=u[:,0] #bug fixes pointed out by Fernando Zago. 19 Nov 2019
#pred=np.outer(mode,v[0,:])
mode[:]=u[:,ind]
pred=np.outer(mode*s[ind],v[ind,:])
dat_clean=self.info['dat_calib']-pred
myvar=np.std(dat_clean,1)**2
self.info['v']=mode
self.info['mywt']=1.0/myvar
self.info['noise']='cm_white'
def apply_noise_cm_white(self,dat=None):
print("I'm not sure how you got here (tod.apply_noise_cm_white), but you should not have been able to. Please complain to someone.")
if dat is None:
dat=self.info['dat_calib']
mat=np.dot(self.info['v'],np.diag(self.info['mywt']))
lhs=np.dot(self.info['v'],mat.transpose())
rhs=np.dot(mat,dat)
#if len(lhs)>1:
if isinstance(lhs,np.ndarray):
cm=np.dot(np.linalg.inv(lhs),rhs)
else:
cm=rhs/lhs
dd=dat-np.outer(self.info['v'],cm)
tmp=np.repeat([self.info['mywt']],len(cm),axis=0).transpose()
dd=dd*tmp
return dd
def set_noise_binned_eig(self,dat=None,freqs=None,scale_facs=None,thresh=5.0):
if dat is None:
dat=self.info['dat_calib']
mycov=np.dot(dat,dat.T)
mycov=0.5*(mycov+mycov.T)
ee,vv=np.linalg.eig(mycov)
mask=ee>thresh*thresh*np.median(ee)
vecs=vv[:,mask]
ts=np.dot(vecs.T,dat)
resid=dat-np.dot(vv[:,mask],ts)
return resid
def set_noise_smoothed_svd(self,fwhm=50,func=None,pars=None,prewhiten=False,fit_powlaw=False):
'''If func comes in as not empty, assume we can call func(pars,tod) to get a predicted model for the tod that
we subtract off before estimating the noise.'''
print('deprecated usage - please switch to tod.set_noise(minkasi.NoiseSmoothedSVD)')
if func is None:
self.set_noise(NoiseSmoothedSVD,self.info['dat_calib'])
else:
dat_use=func(pars,self)
dat_use=self.info['dat_calib']-dat_use
self.set_noise(NoiseSmoothedSVD,dat_use)
return
if func is None:
dat_use=self.info['dat_calib']
else:
dat_use=func(pars,self)
dat_use=self.info['dat_calib']-dat_use
#u,s,v=numpy.linalg.svd(self.info['dat_calib']-tmp,0)
if prewhiten:
noisevec=np.median(np.abs(np.diff(dat_use,axis=1)),axis=1)
dat_use=dat_use/(np.repeat([noisevec],dat_use.shape[1],axis=0).transpose())
u,s,v=np.linalg.svd(dat_use,0)
print('got svd')
ndet=s.size
n=self.info['dat_calib'].shape[1]
self.info['v']=np.zeros([ndet,ndet])
self.info['v'][:]=u.transpose()
dat_rot=np.dot(self.info['v'],self.info['dat_calib'])
if fit_powlaw:
spec_smooth=0*dat_rot
for ind in range(ndet):
fitp,datsqr,C=fit_ts_ps(dat_rot[ind,:]);
spec_smooth[ind,1:]=C
else:
dat_trans=mkfftw.fft_r2r(dat_rot)
spec_smooth=smooth_many_vecs(dat_trans**2,fwhm)
spec_smooth[:,1:]=1.0/spec_smooth[:,1:]
spec_smooth[:,0]=0
if prewhiten:
self.info['noisevec']=noisevec.copy()
self.info['mywt']=spec_smooth
self.info['noise']='smoothed_svd'
#return dat_rot
def apply_noise(self,dat=None):
if dat is None:
#dat=self.info['dat_calib']
dat=self.get_data().copy() #the .copy() is here so you don't
#overwrite data stored in the TOD.
if self.noise_delayed:
self.noise=self.noise_modelclass(dat,*(self.noise_args), **(self.noise_kwargs))
self.noise_delayed=False
try:
return self.noise.apply_noise(dat)
except:
print("unable to use class-based noised, falling back onto hardwired.")
if self.info['noise']=='cm_white':
#print 'calling cm_white'
return self.apply_noise_cm_white(dat)
if self.info['noise']=='white_masked':
return self.apply_noise_white_masked(dat)
#if self.info.has_key('noisevec'):
if 'noisevec' in self.info:
noisemat=np.repeat([self.info['noisevec']],dat.shape[1],axis=0).transpose()
dat=dat/noisemat
dat_rot=np.dot(self.info['v'],dat)
datft=mkfftw.fft_r2r(dat_rot)
nn=datft.shape[1]
datft=datft*self.info['mywt'][:,0:nn]
dat_rot=mkfftw.fft_r2r(datft)
dat=np.dot(self.info['v'].transpose(),dat_rot)
#if self.info.has_key('noisevec'):
if 'noisevec' in self.info:
dat=dat/noisemat
dat[:,0]=0.5*dat[:,0]
dat[:,-1]=0.5*dat[:,-1]
return dat
def mapset2tod(self,mapset,dat=None):
if dat is None:
#dat=0*self.info['dat_calib']
dat=self.get_empty(True)
for map in mapset.maps:
map.map2tod(self,dat)
return dat
def tod2mapset(self,mapset,dat=None):
if dat is None:
#dat=self.info['dat_calib']
dat=self.get_data()
for map in mapset.maps:
map.tod2map(self,dat)
def dot(self,mapset,mapset_out,times=False):
#tmp=0.0*self.info['dat_calib']
#for map in mapset.maps:
# map.map2tod(self,tmp)
t1=time.time()
tmp=self.mapset2tod(mapset)
t2=time.time()
tmp=self.apply_noise(tmp)
t3=time.time()
self.tod2mapset(mapset_out,tmp)
t4=time.time()
#for map in mapset_out.maps:
# map.tod2map(self,tmp)
if times:
return(np.asarray([t2-t1,t3-t2,t4-t3]))
def set_jumps(self,jumps):
self.jumps=jumps
def cut_detectors(self,isgood):
#cut all detectors not in boolean array isgood
isbad=np.asarray(1-isgood,dtype='bool')
bad_inds=np.where(isbad)
bad_inds=np.fliplr(bad_inds)
bad_inds=bad_inds[0]
print(bad_inds)
nkeep=np.sum(isgood)
for key in self.info.keys():
if isinstance(self.info[key],np.ndarray):
self.info[key]=slice_with_copy(self.info[key],isgood)
if not(self.jumps is None):
for i in bad_inds:
print('i in bad_inds is ',i)
del(self.jumps[i])
if not(self.cuts is None):
for i in bad_inds:
del(self.cuts[i])
def timestream_chisq(self,dat=None):
if dat is None:
dat=self.info['dat_calib']
dat_filt=self.apply_noise(dat)
chisq=np.sum(dat_filt*dat)
return chisq
def prior_from_skymap(self,skymap):
"""stuff.
prior_from_skymap(self,skymap):
Given e.g. the gradient of a map that has been zeroed under some threshold,
return a CutsCompact object that can be used as a prior for solving for per-sample deviations
due to strong map gradients. This is to reduce X's around bright sources. The input map
should be a SkyMap that is non-zero where one wishes to solve for the per-sample deviations, and
the non-zero values should be the standard deviations expected in those pixel. The returned CutsCompact
object will have the weight (i.e. 1/input squared) in its map.
"""
tmp=np.zeros(self.info['dat_calib'].shape)
skymap.map2tod(self,tmp)
mask=(tmp==0)
prior=CutsCompact(self)
prior.cuts_from_array(mask)
prior.get_imap()
prior.tod2map(self,tmp)
prior.map=1.0/prior.map**2
return prior
def slice_with_copy(arr,ind):
if isinstance(arr,np.ndarray):
myshape=arr.shape
if len(myshape)==1:
ans=np.zeros(ind.sum(),dtype=arr.dtype)
print(ans.shape)
print(ind.sum())
ans[:]=arr[ind]
else:
mydims=np.append(np.sum(ind),myshape[1:])
print(mydims,mydims.dtype)
ans=np.zeros(mydims,dtype=arr.dtype)
ans[:,:]=arr[ind,:].copy()
return ans
return None #should not get here
class TodVec:
def __init__(self):
self.tods=[]
self.ntod=0
def add_tod(self,tod):
self.tods.append(tod.copy())
self.tods[-1].set_tag(self.ntod)
self.ntod=self.ntod+1
def lims(self):
if self.ntod==0:
return None
xmin,xmax,ymin,ymax=self.tods[0].lims()
for i in range(1,self.ntod):
x1,x2,y1,y2=self.tods[i].lims()
xmin=min(x1,xmin)
xmax=max(x2,xmax)
ymin=min(y1,ymin)
ymax=max(y2,ymax)
if have_mpi:
print('before reduction lims are ',[xmin,xmax,ymin,ymax])
xmin=comm.allreduce(xmin,op=MPI.MIN)
xmax=comm.allreduce(xmax,op=MPI.MAX)
ymin=comm.allreduce(ymin,op=MPI.MIN)
ymax=comm.allreduce(ymax,op=MPI.MAX)
print('after reduction lims are ',[xmin,xmax,ymin,ymax])
return [xmin,xmax,ymin,ymax]
def set_pix(self,map):
for tod in self.tods:
#ipix=map.get_pix(tod)
#tod.info['ipix']=ipix
tod.set_pix(map)
def set_apix(self):
for tod in self.tods:
tod.set_apix()
def dot_cached(self,mapset,mapset2=None):
nthread=get_nthread()
mapset2.get_caches()
for i in range(self.ntod):
tod=self.tods[i]
tod.dot(mapset,mapset2)
mapset2.clear_caches()
if have_mpi:
mapset2.mpi_reduce()
return mapset2
def get_nsamp(self,reduce=True):
tot=0
for tod in self.tods:
tot=tot+tod.get_nsamp()
if reduce:
if have_mpi:
tot=comm.allreduce(tot)
return tot
def dot(self,mapset,mapset2=None,report_times=False,cache_maps=False):
if mapset2 is None:
mapset2=mapset.copy()
mapset2.clear()
if cache_maps:
mapset2=self.dot_cached(mapset,mapset2)
return mapset2
times=np.zeros(self.ntod)
tot_times=0
#for tod in self.tods:
for i in range(self.ntod):
tod=self.tods[i]
t1=time.time()
mytimes=tod.dot(mapset,mapset2,True)
t2=time.time()
tot_times=tot_times+mytimes
times[i]=t2-t1
if have_mpi:
mapset2.mpi_reduce()
print(tot_times)
if report_times:
return mapset2,times
else:
return mapset2
def make_rhs(self,mapset,do_clear=False):
if do_clear:
mapset.clear()
for tod in self.tods:
dat_filt=tod.apply_noise()
for map in mapset.maps:
map.tod2map(tod,dat_filt)
if have_mpi:
mapset.mpi_reduce()
def read_tod_from_fits_cbass(fname,dopol=False,lat=37.2314,lon=-118.2941,v34=True,nm20=False):
f=pyfits.open(fname)
raw=f[1].data
ra=raw['RA']
dec=raw['DEC']
flag=raw['FLAG']
I=0.5*(raw['I1']+raw['I2'])
mjd=raw['MJD']
tvec=(mjd-2455977.5+2400000.5)*86400+1329696000
#(mjd-2455977.5)*86400+1329696000;
dt=np.median(np.diff(tvec))
dat={}
dat['dx']=np.reshape(np.asarray(ra,dtype='float64'),[1,len(ra)])
dat['dy']=np.reshape(np.asarray(dec,dtype='float64'),[1,len(dec)])
dat['dt']=dt
dat['ctime']=tvec
if dopol:
dat['dx']=np.vstack([dat['dx'],dat['dx']])
dat['dy']=np.vstack([dat['dy'],dat['dy']])
Q=0.5*(raw['Q1']+raw['Q2'])
U=0.5*(raw['U1']+raw['U2'])
dat['dat_calib']=np.zeros([2,len(Q)])
if v34: #We believe this is the correct sign convention for V34
dat['dat_calib'][0,:]=-U
dat['dat_calib'][1,:]=Q
else:
dat['dat_calib'][0,:]=Q
dat['dat_calib'][1,:]=U
az=raw['AZ']
el=raw['EL']
#JLS- changing default az/el to radians and not degrees in TOD
dat['az']=az*np.pi/180
dat['el']=el*np.pi/180
#dat['AZ']=az
#dat['EL']=el
#dat['ctime']=tvec
dat['mask']=np.zeros([2,len(Q)],dtype='int8')
dat['mask'][0,:]=1-raw['FLAG']
dat['mask'][1,:]=1-raw['FLAG']
if have_qp:
Q = qp.QPoint(accuracy='low', fast_math=True, mean_aber=True,num_threads=4)
#q_bore = Q.azel2bore(dat['AZ'], dat['EL'], 0*dat['AZ'], 0*dat['AZ'], lon*np.pi/180, lat*np.pi/180, dat['ctime'])
q_bore = Q.azel2bore(az,el, 0*az, 0*az, lon, lat, dat['ctime'])
q_off = Q.det_offset(0.0,0.0,0.0)
#ra, dec, sin2psi, cos2psi = Q.bore2radec(q_off, ctime, q_bore)
ra, dec, sin2psi, cos2psi = Q.bore2radec(q_off, tvec, q_bore)
tmp=np.arctan2(sin2psi,cos2psi)
tmp=tmp-np.pi/2 #this seems to be needed to get these coordinates to line up with
#the expected, in IAU convention I believe. JLS Nov 12 2020
#dat['twogamma_saved']=np.arctan2(sin2psi,cos2psi)
dat['twogamma_saved']=np.vstack([tmp,tmp+np.pi/2])
#print('pointing rms is ',np.std(ra*np.pi/180-dat['dx']),np.std(dec*np.pi/180-dat['dy']))
dat['ra']=ra*np.pi/180
dat['dec']=dec*np.pi/180
else:
dat['dat_calib']=np.zeros([1,len(I)])
dat['dat_calib'][:]=I
dat['mask']=np.zeros([1,len(I)],dtype='int8')
dat['mask'][:]=1-raw['FLAG']
dat['pixid']=[0]
dat['fname']=fname
if nm20:
try:
#kludget to read in bonus cuts, which should be in f[3]
raw=f[3].data
dat['nm20_start']=raw['START']
dat['nm20_stop']=raw['END']
#nm20=0*dat['flag']
print(dat.keys())
nm20=0*dat['mask']
start=dat['nm20_start']
stop=dat['nm20_stop']
for i in range(len(start)):
nm20[:,start[i]:stop[i]]=1
#nm20[:,start[i]:stop[i]]=0
dat['mask']=dat['mask']*nm20
except:
print('missing nm20 for ',fname)
f.close()
return dat
def read_tod_from_fits(fname,hdu=1,branch=None):
f=pyfits.open(fname)
raw=f[hdu].data
#print 'sum of cut elements is ',np.sum(raw['UFNU']<9e5)
try : #read in calinfo (per-scan beam volumes etc) if present
calinfo={'calinfo':True}
kwds=('scan','bunit','azimuth','elevatio','beameff','apereff','antgain','gainunc','bmaj','bmin','bpa','parang','beamvol','beamvunc')#for now just hardwired ones we want
for kwd in kwds:
calinfo[kwd]=f[hdu].header[kwd]
except KeyError :
print('WARNING - calinfo information not found in fits file header - to track JytoK etc you may need to reprocess the fits files using mustangidl > revision 932')
calinfo['calinfo']=False
pixid=raw['PIXID']
dets=np.unique(pixid)
ndet=len(dets)
nsamp=len(pixid)/len(dets)
if True:
ff=180/np.pi
xmin=raw['DX'].min()*ff
xmax=raw['DX'].max()*ff
ymin=raw['DY'].min()*ff
ymax=raw['DY'].max()*ff
print('nsamp and ndet are ',ndet,nsamp,len(pixid),' on ',fname, 'with lims ',xmin,xmax,ymin,ymax)
else:
print('nsamp and ndet are ',ndet,nsamp,len(pixid),' on ',fname)
#print raw.names
dat={}
#this bit of odd gymnastics is because a straightforward reshape doesn't seem to leave the data in
#memory-contiguous order, which causes problems down the road
#also, float32 is a bit on the edge for pointing, so cast to float64
dx=raw['DX']
if not(branch is None):
bb=branch*np.pi/180.0
dx[dx>bb]=dx[dx>bb]-2*np.pi
#dat['dx']=np.zeros([ndet,nsamp],dtype=type(dx[0]))
ndet=int(ndet)
nsamp=int(nsamp)
dat['dx']=np.zeros([ndet,nsamp],dtype='float64')
dat['dx'][:]=np.reshape(dx,[ndet,nsamp])[:]
dy=raw['DY']
#dat['dy']=np.zeros([ndet,nsamp],dtype=type(dy[0]))
dat['dy']=np.zeros([ndet,nsamp],dtype='float64')
dat['dy'][:]=np.reshape(dy,[ndet,nsamp])[:]
if 'ELEV' in raw.names:
elev=raw['ELEV']*np.pi/180
dat['elev']=np.zeros([ndet,nsamp],dtype='float64')
dat['elev'][:]=np.reshape(elev,[ndet,nsamp])[:]
tt=np.reshape(raw['TIME'],[ndet,nsamp])
tt=tt[0,:]
dt=np.median(np.diff(tt))
dat['dt']=dt
pixid=np.reshape(pixid,[ndet,nsamp])
pixid=pixid[:,0]
dat['pixid']=pixid
dat_calib=raw['FNU']
#print 'shapes are ',raw['FNU'].shape,raw['UFNU'].shape,np.mean(raw['UFNU']>9e5)
#dat_calib[raw['UFNU']>9e5]=0.0
#dat['dat_calib']=np.zeros([ndet,nsamp],dtype=type(dat_calib[0]))
dat['dat_calib']=np.zeros([ndet,nsamp],dtype='float64') #go to double because why not
dat_calib=np.reshape(dat_calib,[ndet,nsamp])
dat['dat_calib'][:]=dat_calib[:]
if np.sum(raw['UFNU']>9e5)>0:
dat['mask']=np.reshape(raw['UFNU']<9e5,dat['dat_calib'].shape)
dat['mask_sum']=np.sum(dat['mask'],axis=0)
#print 'cut frac is now ',np.mean(dat_calib==0)
#print 'cut frac is now ',np.mean(dat['dat_calib']==0),dat['dat_calib'][0,0]
dat['fname']=fname
dat['calinfo']=calinfo
f.close()
return dat
def downsample_array_r2r(arr,fac):
n=arr.shape[1]
nn=int(n/fac)
arr_ft=mkfftw.fft_r2r(arr)
arr_ft=arr_ft[:,0:nn].copy()
arr=mkfftw.fft_r2r(arr_ft)/(2*(n-1))
return arr
def downsample_vec_r2r(vec,fac):
n=len(vec)
nn=int(n/fac)
vec_ft=mkfftw.fft_r2r(vec)
vec_ft=vec_ft[0:nn].copy()
vec=mkfftw.fft_r2r(vec_ft)/(2*(n-1))
return vec
def downsample_tod(dat,fac=10):
ndata=dat['dat_calib'].shape[1]
keys=dat.keys()
for key in dat.keys():
try:
if len(dat[key].shape)==1:
#print('working on downsampling ' + key)
#print('shape is ' + repr(dat[key].shape[0])+' '+repr(n))
if len(dat[key]):
#print('working on downsampling ' + key)
dat[key]=downsample_vec_r2r(dat[key],fac)
else:
if dat[key].shape[1]==ndata:
#print 'downsampling ' + key
dat[key]=downsample_array_r2r(dat[key],fac)
except:
#print 'not downsampling ' + key
pass
def truncate_tod(dat,primes=[2,3,5,7,11]):
n=dat['dat_calib'].shape[1]
lens=find_good_fft_lens(n-1,primes)
n_new=lens.max()+1
if n_new<n:
print('truncating from ',n,' to ',n_new)
for key in dat.keys():
try:
#print('working on key ' + key)
if len(dat[key].shape)==1:
if dat[key].shape[0]==n:
dat[key]=dat[key][:n_new].copy()
else:
if dat[key].shape[1]==n:
dat[key]=dat[key][:,0:n_new].copy()
except:
#print('skipping key ' + key)
pass
def todvec_from_files_octave(fnames):
todvec=TodVec()
for fname in fnames:
info=read_octave_struct(fname)
tod=Tod(info)
todvec.add_tod(tod)
return todvec
def make_hits(todvec,map,do_weights=False):
hits=map.copy()
try:
if map.npol>1:
hits.set_polstate(map.poltag+'_PRECON')
except:
pass
hits.clear()
for tod in todvec.tods:
if do_weights:
try:
weights=tod.get_det_weights()
#sz=tod.info['dat_calib'].shape
sz=tod.get_data_dims()
tmp=np.outer(weights,np.ones(sz[1]))
#tmp=np.outer(weights,np.ones(tod.info['dat_calb'].shape[1]))
except:
print("error in making weight map. Detector weights requested, but do not appear to be present. Do you have a noise model?")
else:
#tmp=np.ones(tod.info['dat_calib'].shape)
tmp=np.ones(tod.get_data_dims())
#if tod.info.has_key('mask'):
if 'mask' in tod.info:
tmp=tmp*tod.info['mask']
hits.tod2map(tod,tmp)
if have_mpi:
print('reducing hits')
tot=hits.map.sum()
print('starting with total hitcount ' + repr(tot))
hits.mpi_reduce()
tot=hits.map.sum()
print('ending with total hitcount ' + repr(tot))
return hits
def decimate(vec,nrep=1):
for i in range(nrep):
if len(vec)%2:
vec=vec[:-1]
vec=0.5*(vec[0::2]+vec[1::2])
return vec
def plot_ps(vec,downsamp=0):
vecft=mkfftw.fft_r2r(vec)
def get_wcs(lims,pixsize,proj='CAR',cosdec=None,ref_equ=False):
w=wcs.WCS(naxis=2)
dec=0.5*(lims[2]+lims[3])
if cosdec is None:
cosdec=np.cos(dec)
if proj=='CAR':
#CAR in FITS seems to already correct for cosin(dec), which has me confused, but whatever...
cosdec=1.0
if ref_equ:
w.wcs.crval=[0.0,0.0]
#this seems to be a needed hack if you want the position sent
#in for the corner to actually be the corner.
w.wcs.crpix=[lims[1]/pixsize+1,-lims[2]/pixsize+1]
#w.wcs.crpix=[lims[1]/pixsize,-lims[2]/pixsize]
#print 'crpix is ',w.wcs.crpix
else:
w.wcs.crpix=[1.0,1.0]
w.wcs.crval=[lims[1]*180/np.pi,lims[2]*180/np.pi]
w.wcs.cdelt=[-pixsize/cosdec*180/np.pi,pixsize*180/np.pi]
w.wcs.ctype=['RA---CAR','DEC--CAR']
return w
print('unknown projection type ',proj,' in get_wcs.')
return None
def get_aligned_map_subregion_car(lims,fname=None,big_wcs=None,osamp=1):
"""Get a wcs for a subregion of a map, with optionally finer pixellization.
Designed for use in e.g. combining ACT maps and Mustang data. Input arguments
are RA/Dec limits for the subregion (which will be tweaked as-needed) and either a
WCS structure or the name of a FITS file containing the WCS info the sub-region
will be aligned with."""
if big_wcs is None:
if fname is None:
print("Error in get_aligned_map_subregion_car. Must send in either a file or a WCS.")
big_wcs=wcs.WCS(fname)
ll=np.asarray(lims)
ll=ll*180/np.pi
#get the ra/dec limits in big pixel coordinates
corner1=big_wcs.wcs_world2pix(ll[0],ll[2],0)
corner2=big_wcs.wcs_world2pix(ll[1],ll[3],0)
#get the pixel edges for the corners. FITS works in
#pixel centers, so edges are a half-pixel off
corner1[0]=np.ceil(corner1[0])+0.5
corner1[1]=np.floor(corner1[1])-0.5
corner2[0]=np.floor(corner2[0])-0.5
corner2[1]=np.ceil(corner2[1])+0.5
corner1_radec=big_wcs.wcs_pix2world(corner1[0],corner1[1],0)
corner2_radec=big_wcs.wcs_pix2world(corner2[0],corner2[1],0)
dra=(corner1_radec[0]-corner2_radec[0])/(corner1[0]-corner2[0])
ddec=(corner1_radec[1]-corner2_radec[1])/(corner1[1]-corner2[1])
assert(np.abs(dra/ddec)-1<1e-5) #we are not currently smart enough to deal with rectangular pixels
lims_use=np.asarray([corner1_radec[0],corner2_radec[0],corner1_radec[1],corner2_radec[1]])
pixsize=ddec/osamp
lims_use=lims_use+np.asarray([0.5,-0.5,0.5,-0.5])*pixsize
small_wcs=get_wcs(lims_use*np.pi/180,pixsize*np.pi/180,ref_equ=True)
imin=int(np.round(corner2[0]+0.5))
jmin=int(np.round(corner1[1]+0.5))
map_corner=np.asarray([imin,jmin],dtype='int')
lims_use=lims_use*np.pi/180
return small_wcs,lims_use,map_corner
def fit_linear_ps_uncorr(dat,vecs,tol=1e-3,guess=None,max_iter=15):
if guess is None:
lhs=np.dot(vecs,vecs.transpose())
rhs=np.dot(vecs,dat**2)
guess=np.dot(np.linalg.inv(lhs),rhs)
guess=0.5*guess #scale down since we're less likely to run into convergence issues if we start low
#print guess
fitp=guess.copy()
converged=False
npp=len(fitp)
iter=0
grad_tr=np.zeros(npp)
grad_chi=np.zeros(npp)
curve=np.zeros([npp,npp])
datsqr=dat*dat
while (converged==False):
iter=iter+1
C=np.dot(fitp,vecs)
Cinv=1.0/C
for i in range(npp):
grad_chi[i]=0.5*np.sum(datsqr*vecs[i,:]*Cinv*Cinv)
grad_tr[i]=-0.5*np.sum(vecs[i,:]*Cinv)
for j in range(i,npp):
#curve[i,j]=-0.5*np.sum(datsqr*Cinv*Cinv*Cinv*vecs[i,:]*vecs[j,:]) #data-only curvature
#curve[i,j]=-0.5*np.sum(Cinv*Cinv*vecs[i,:]*vecs[j,:]) #Fisher curvature
curve[i,j]=0.5*np.sum(Cinv*Cinv*vecs[i,:]*vecs[j,:])-np.sum(datsqr*Cinv*Cinv*Cinv*vecs[i,:]*vecs[j,:]) #exact
curve[j,i]=curve[i,j]
grad=grad_chi+grad_tr
curve_inv=np.linalg.inv(curve)
errs=np.diag(curve_inv)
dp=np.dot(grad,curve_inv)
fitp=fitp-dp
frac_shift=dp/errs
#print dp,errs,frac_shift
if np.max(np.abs(frac_shift))<tol:
print('successful convergence after ',iter,' iterations with error estimate ',np.max(np.abs(frac_shift)))
converged=True
print(C[0],C[-1])
if iter==max_iter:
print('not converging after ',iter,' iterations in fit_linear_ps_uncorr with current convergence parameter ',np.max(np.abs(frac_shift)))
converged=True
return fitp
def get_curve_deriv_powspec(fitp,nu_scale,lognu,datsqr,vecs):
vec=nu_scale**fitp[2]
C=fitp[0]+fitp[1]*vec
Cinv=1.0/C
vecs[1,:]=vec
vecs[2,:]=fitp[1]*lognu*vec
grad_chi=0.5*np.dot(vecs,datsqr*Cinv*Cinv)
grad_tr=-0.5*np.dot(vecs,Cinv)
grad=grad_chi+grad_tr
np=len(grad_chi)
curve=np.zeros([np,np])
for i in range(np):
for j in range(i,np):
curve[i,j]=0.5*np.sum(Cinv*Cinv*vecs[i,:]*vecs[j,:])-np.sum(datsqr*Cinv*Cinv*Cinv*vecs[i,:]*vecs[j,:])
curve[j,i]=curve[i,j]
like=-0.5*sum(datsqr*Cinv)-0.5*sum(np.log(C))
return like,grad,curve,C
def fit_ts_ps(dat,dt=1.0,ind=-1.5,nu_min=0.0,nu_max=np.inf,scale_fac=1.0,tol=0.01):
datft=mkfftw.fft_r2r(dat)
n=len(datft)
dnu=0.5/(len(dat)*dt) #coefficient should reflect the type of fft you did...
nu=dnu*np.arange(n)
isgood=(nu>nu_min)&(nu<nu_max)
datft=datft[isgood]
nu=nu[isgood]
n=len(nu)
vecs=np.zeros([2,n])
vecs[0,:]=1.0 #white noise
vecs[1,:]=nu**ind
guess=fit_linear_ps_uncorr(datft,vecs)
pred=np.dot(guess,vecs)
#pred=guess[0]*vecs[0]+guess[1]*vecs[1]
#return pred
rat=vecs[1,:]*guess[1]/(vecs[0,:]*guess[0])
#print 'rat lims are ',rat.max(),rat.min()
my_ind=np.max(np.where(rat>1)[0])
nu_ref=np.sqrt(nu[my_ind]*nu[0]) #WAG as to a sensible frequency pivot point
#nu_ref=0.2*nu[my_ind] #WAG as to a sensible frequency pivot point
#print 'knee is roughly at ',nu[my_ind],nu_ref
#model = guess[1]*nu^ind+guess[0]
# = guess[1]*(nu/nu_ref*nu_ref)^ind+guess[0]
# = guess[1]*(nu_ref)^in*(nu/nu_ref)^ind+guess[0]
nu_scale=nu/nu_ref
guess_scale=guess.copy()
guess_scale[1]=guess[1]*(nu_ref**ind)
#print 'guess is ',guess
#print 'guess_scale is ',guess_scale
C_scale=guess_scale[0]+guess_scale[1]*(nu_scale**ind)
fitp=np.zeros(3)
fitp[0:2]=guess_scale
fitp[2]=ind
npp=3
vecs=np.zeros([npp,n])
vecs[0,:]=1.0
lognu=np.log(nu_scale)
curve=np.zeros([npp,npp])
grad_chi=np.zeros(npp)
grad_tr=np.zeros(npp)
datsqr=datft**2
#for robustness, start with downscaling 1/f part
fitp[1]=0.5*fitp[1]
like,grad,curve,C=get_curve_deriv_powspec(fitp,nu_scale,lognu,datsqr,vecs)
lamda=0.0
#print 'starting likelihood is',like
for iter in range(50):
tmp=curve+lamda*np.diag(np.diag(curve))
curve_inv=np.linalg.inv(tmp)
dp=np.dot(grad,curve_inv)
trial_fitp=fitp-dp
errs=np.sqrt(-np.diag(curve_inv))
frac=dp/errs
new_like,new_grad,new_curve,C=get_curve_deriv_powspec(trial_fitp,nu_scale,lognu,datsqr,vecs)
if (new_like>like):
#if True:
like=new_like
grad=new_grad
curve=new_curve
fitp=trial_fitp
lamda=update_lamda(lamda,True)
else:
lamda=update_lamda(lamda,False)
if (lamda==0)&(np.max(np.abs(frac))<tol):
converged=True
else:
converged=False
if False:
vec=nu_scale**fitp[2]
C=fitp[0]+fitp[1]*vec
Cinv=1.0/C
vecs[1,:]=vec
vecs[2,:]=fitp[1]*lognu*vec
like=-0.5*np.sum(datsqr*Cinv)-0.5*np.sum(np.log(C))
for i in range(np):
grad_chi[i]=0.5*np.sum(datsqr*vecs[i,:]*Cinv*Cinv)
grad_tr[i]=-0.5*np.sum(vecs[i,:]*Cinv)
for j in range(i,np):
curve[i,j]=0.5*np.sum(Cinv*Cinv*vecs[i,:]*vecs[j,:])-np.sum(datsqr*Cinv*Cinv*Cinv*vecs[i,:]*vecs[j,:])
curve[j,i]=curve[i,j]
grad=grad_chi+grad_tr
curve_inv=np.linalg.inv(curve)
errs=np.diag(curve_inv)
dp=np.dot(grad,curve_inv)
fitp=fitp-dp*scale_fac
frac_shift=dp/errs
#print fitp,errs,frac_shift,np.mean(np.abs(new_grad-grad))
#print fitp,grad,frac,lamda
if converged:
print('converged after ',iter,' iterations')
break
#C=np.dot(guess,vecs)
print('mean diff is ',np.mean(np.abs(C_scale-C)))
#return datft,vecs,nu,C
return fitp,datsqr,C
def get_derivs_tod_isosrc(pars,tod,niso=None):
np_src=4
np_iso=5
#nsrc=(len(pars)-np_iso)/np_src
npp=len(pars)
if niso is None:
niso=(npp%np_src)/(np_iso-np_src)
nsrc=(npp-niso*np_iso)/np_src
#print nsrc,niso
fitp_iso=np.zeros(np_iso)
fitp_iso[:]=pars[:np_iso]
#print 'fitp_iso is ',fitp_iso
derivs_iso,f_iso=derivs_from_isobeta_c(fitp_iso,tod)
#nn=tod.info['dat_calib'].size
nn=tod.get_nsamp()
derivs=np.reshape(derivs_iso,[np_iso,nn])
pred=f_iso
for ii in range(nsrc):
fitp_src=np.zeros(np_src)
istart=np_iso+ii*np_src
fitp_src[:]=pars[istart:istart+np_src]
derivs_src,f_src=derivs_from_gauss_c(fitp_src,tod)
pred=pred+f_src
derivs_src_tmp=np.reshape(derivs_src,[np_src,nn])
derivs=np.append(derivs,derivs_src_tmp,axis=0)
return derivs,pred
def get_curve_deriv_tod_manygauss(pars,tod,return_vecs=False):
npp=4
nsrc=len(pars)//npp
fitp_gauss=np.zeros(npp)
#dat=tod.info['dat_calib']
dat=tod.get_data()
big_derivs=np.zeros([npp*nsrc,dat.shape[0],dat.shape[1]])
pred=0
curve=np.zeros([npp*nsrc,npp*nsrc])
deriv=np.zeros([npp*nsrc])
for i in range(nsrc):
fitp_gauss[:]=pars[i*npp:(i+1)*npp]
derivs,src_pred=derivs_from_gauss_c(fitp_gauss,tod)
pred=pred+src_pred
big_derivs[i*npp:(i+1)*npp,:,:]=derivs
delt=dat-pred
delt_filt=tod.apply_noise(delt)
chisq=0.5*np.sum(delt[:,0]*delt_filt[:,0])
chisq=chisq+0.5*np.sum(delt[:,-1]*delt_filt[:,-1])
chisq=chisq+np.sum(delt[:,1:-1]*delt_filt[:,1:-1])
for i in range(npp*nsrc):
deriv_filt=tod.apply_noise(big_derivs[i,:,:])
for j in range(i,npp*nsrc):
curve[i,j]=curve[i,j]+0.5*np.sum(deriv_filt[:,0]*big_derivs[j,:,0])
curve[i,j]=curve[i,j]+0.5*np.sum(deriv_filt[:,-1]*big_derivs[j,:,-1])
curve[i,j]=curve[i,j]+np.sum(deriv_filt[:,1:-1]*big_derivs[j,:,1:-1])
curve[j,i]=curve[i,j]
#print i,j,curve[i,j]
deriv[i]=deriv[i]+0.5*np.sum(deriv_filt[:,0]*delt[:,0])
deriv[i]=deriv[i]+0.5*np.sum(deriv_filt[:,-1]*delt[:,-1])
deriv[i]=deriv[i]+np.sum(deriv_filt[:,1:-1]*delt[:,1:-1])
return curve,deriv,chisq
def get_curve_deriv_tod_isosrc(pars,tod,return_vecs=False):
np_src=4
np_iso=5
nsrc=(len(pars)-np_iso)/np_src
#print 'nsrc is ',nsrc
fitp_iso=np.zeros(np_iso)
fitp_iso[:]=pars[:np_iso]
#print 'fitp_iso is ',fitp_iso
derivs_iso,f_iso=derivs_from_isobeta_c(fitp_iso,tod)
derivs_iso_filt=0*derivs_iso
#tmp=0*tod.info['dat_calib']
tmp=tod.get_empty(True)
#nn=tod.info['dat_calib'].size
nn=tod.get_nsamp
for i in range(np_iso):
tmp[:,:]=derivs_iso[i,:,:]
derivs_iso_filt[i,:,:]=tod.apply_noise(tmp)
derivs=np.reshape(derivs_iso,[np_iso,nn])
derivs_filt=np.reshape(derivs_iso_filt,[np_iso,nn])
pred=f_iso
for ii in range(nsrc):
fitp_src=np.zeros(np_src)
istart=np_iso+ii*np_src
fitp_src[:]=pars[istart:istart+np_src]
#print 'fitp_src is ',fitp_src
derivs_src,f_src=derivs_from_gauss_c(fitp_src,tod)
pred=pred+f_src
derivs_src_filt=0*derivs_src
for i in range(np_src):
tmp[:,:]=derivs_src[i,:,:]
derivs_src_filt[i,:,:]=tod.apply_noise(tmp)
derivs_src_tmp=np.reshape(derivs_src,[np_src,nn])
derivs=np.append(derivs,derivs_src_tmp,axis=0)
derivs_src_tmp=np.reshape(derivs_src_filt,[np_src,nn])
derivs_filt=np.append(derivs_filt,derivs_src_tmp,axis=0)
#delt_filt=tod.apply_noise(tod.info['dat_calib']-pred)
delt_filt=tod.apply_noise(tod.get_data()-pred)
delt_filt=np.reshape(delt_filt,nn)
#dvec=np.reshape(tod.info['dat_calib'],nn)
dvec=np.ravel(tod.get_data())
#predvec=np.reshape(pred,nn)
predvec=np.ravel(pred)
delt=dvec-predvec
grad=np.dot(derivs_filt,delt)
grad2=np.dot(derivs,delt_filt)
curve=np.dot(derivs_filt,derivs.transpose())
#return pred
if return_vecs:
return grad,grad2,curve,derivs,derivs_filt,delt,delt_filt
else:
return grad,grad2,curve
def get_timestream_chisq_from_func(func,pars,tods):
chisq=0.0
for tod in tods.tods:
derivs,pred=func(pars,tod)
#delt=tod.info['dat_calib']-pred
delt=tod.get_data()-pred
delt_filt=tod.apply_noise(delt)
delt_filt[:,0]=delt_filt[:,0]*0.5
delt_filt[:,-1]=delt_filt[:,-1]*0.5
chisq=chisq+np.sum(delt*delt_filt)
return chisq
def get_timestream_chisq_curve_deriv_from_func(func,pars,tods,rotmat=None,*args,**kwargs):
chisq=0.0
grad=0.0
curve=0.0
#print 'inside func, len(tods) is ',len(tods.tods),len(pars)
for tod in tods.tods:
#print 'type of tod is ',type(tod)
derivs,pred=func(pars,tod,*args,**kwargs)
if not(rotmat is None):
derivs=np.dot(rotmat.transpose(),derivs)
derivs=np.reshape(derivs,[derivs.shape[0],np.product(derivs.shape[1:])])
derivs_filt=0*derivs
#print('derivs_filt shape is ',derivs_filt.shape)
#derivs_filt=np.reshape(derivs_filt,[derivs_filt.shape[0],np.product(derivs_filt.shape[1:])])
#sz=tod.info['dat_calib'].shape
sz=tod.get_data_dims()
tmp=np.zeros(sz)
npp=derivs.shape[0]
nn=np.product(derivs.shape[1:])
#delt=tod.info['dat_calib']-pred
delt=tod.get_data()-pred
delt_filt=tod.apply_noise(delt)
for i in range(npp):
tmp[:,:]=np.reshape(derivs[i,:],sz)
tmp_filt=tod.apply_noise(tmp)
#tmp_filt[:,1:-1]=tmp_filt[:,1:-1]*2
tmp_filt[:,0]=tmp_filt[:,0]*0.5
tmp_filt[:,-1]=tmp_filt[:,-1]*0.5
derivs_filt[i,:]=np.reshape(tmp_filt,nn)
delt=np.reshape(delt,nn)
delt_filt=np.reshape(delt_filt,nn)
grad1=np.dot(derivs,delt_filt)
grad2=np.dot(derivs_filt,delt)
#print 'grad error is ',np.mean(np.abs((grad1-grad2)/(0.5*(np.abs(grad1)+np.abs(grad2)))))
grad=grad+0.5*(grad1+grad2)
curve=curve+np.dot(derivs,derivs_filt.transpose())
chisq=chisq+np.dot(delt,delt_filt)
curve=0.5*(curve+curve.transpose())
if have_mpi:
curve=comm.allreduce(curve)
grad=comm.allreduce(grad)
chisq=comm.allreduce(chisq)
return chisq,grad,curve
def get_ts_derivs_many_funcs(tod,pars,npar_fun,funcs,func_args=None,*args,**kwargs):
#ndet=tod.info['dat_calib'].shape[0]
#ndat=tod.info['dat_calib'].shape[1]
ndet=tod.get_ndet()
ndat=tod.get_ndata()
npar=np.sum(np.asarray(npar_fun),dtype='int')
#vals=np.zeros([ndet,ndat])
pred=0
derivs=np.zeros([npar,ndet,ndat])
icur=0
for i in range(len(funcs)):
tmp=pars[icur:icur+npar_fun[i]].copy()
myderivs,mypred=funcs[i](tmp,tod,*args,**kwargs)
pred=pred+mypred
derivs[icur:icur+npar_fun[i],:,:]=myderivs
icur=icur+npar_fun[i]
return derivs,pred
#derivs,pred=funcs[i](pars,tod)
def get_ts_curve_derivs_many_funcs(todvec,pars,npar_fun,funcs,driver=get_ts_derivs_many_funcs,*args,**kwargs):
curve=0
grad=0
chisq=0
for tod in todvec.tods:
derivs,pred=driver(tod,pars,npar_fun,funcs,*args,**kwargs)
npar=derivs.shape[0]
ndet=derivs.shape[1]
ndat=derivs.shape[2]
#pred_filt=tod.apply_noise(pred)
derivs_filt=np.empty(derivs.shape)
for i in range(npar):
derivs_filt[i,:,:]=tod.apply_noise(derivs[i,:,:])
derivs=np.reshape(derivs,[npar,ndet*ndat])
derivs_filt=np.reshape(derivs_filt,[npar,ndet*ndat])
#delt=tod.info['dat_calib']-pred
delt=tod.get_data()-pred
delt_filt=tod.apply_noise(delt)
chisq=chisq+np.sum(delt*delt_filt)
delt=np.reshape(delt,ndet*ndat)
#delt_filt=np.reshape(delt_filt,[1,ndet*ndat])
grad=grad+np.dot(derivs_filt,delt.T)
#grad2=grad2+np.dot(derivs,delt_filt.T)
curve=curve+np.dot(derivs_filt,derivs.T)
if have_mpi:
chisq=comm.allreduce(chisq)
grad=comm.allreduce(grad)
curve=comm.allreduce(curve)
return chisq,grad,curve
def update_lamda(lamda,success):
if success:
if lamda<0.2:
return 0
else:
return lamda/np.sqrt(2)
else:
if lamda==0.0:
return 1.0
else:
return 2.0*lamda
def invscale(mat,do_invsafe=False):
vec=1/np.sqrt(abs(np.diag(mat)))
vec[np.where(vec == np.inf)[0]] = 1e-10
mm=np.outer(vec,vec)
mat=mm*mat
#ee,vv=np.linalg.eig(mat)
#print 'rcond is ',ee.max()/ee.min(),vv[:,np.argmin(ee)]
if do_invsafe:
return mm*invsafe(mat)
else:
try:
return mm*np.linalg.inv(mat)
except:
return mm*np.linalg.pinv(mat)
def _par_step(grad,curve,to_fit,lamda,flat_priors=None,return_full=False):
curve_use=curve+lamda*np.diag(np.diag(curve))
if to_fit is None:
step=np.dot(invscale(curve_use,True),grad)
errs=np.sqrt(np.diag(invscale(curve_use,True)))
else:
curve_use=curve_use[to_fit,:]
curve_use=curve_use[:,to_fit]
grad_use=grad[to_fit]
step=np.dot(invscale(curve_use),grad_use)
step_use=np.zeros(len(to_fit))
step_use[to_fit]=step
errs_tmp=np.sqrt(np.diag(invscale(curve_use,True)))
errs=np.zeros(len(to_fit))
errs[to_fit]=errs_tmp
step=step_use
#print('step shape ',step.shape,step)
if return_full:
return step,errs
else:
return step
def fit_timestreams_with_derivs_manyfun(funcs,pars,npar_fun,tods,to_fit=None,to_scale=None,tol=1e-2,chitol=1e-4,maxiter=10,scale_facs=None,driver=get_ts_derivs_many_funcs, priors=None, prior_vals=None):
lamda=0
t1=time.time()
chisq,grad,curve=get_ts_curve_derivs_many_funcs(tods,pars,npar_fun,funcs,driver=driver)
t2=time.time()
if myrank==0:
print('starting chisq is ',chisq,' with ',t2-t1,' seconds to get curvature')
if to_fit is None:
#If to_fit is not already defined, define it an intialize it to true
#we're going to use it to handle not stepping for flat priors
to_fit = np.ones(len(pars),dtype='bool')
for iter in range(maxiter):
temp_to_fit = np.copy(to_fit) #Make a copy of to fit, so we can temporarily set values to false
if np.any(priors):
#first build a mask that will identify parameters with flat priors
flat_mask = np.where((priors == 'flat'))[0]
for flat_id in flat_mask:
print(pars[flat_id])
if (pars[flat_id] == prior_vals[flat_id][0]) or (pars[flat_id] == prior_vals[flat_id][1]):
#Check to see if we're at the boundry values, if so don't fit for this iter
temp_to_fit[flat_id] = False
#Make the new step
pars_new = pars + _par_step(grad, curve, temp_to_fit, lamda)
#check to see if we're outside the range for the flat priors: if so, peg them
print('old gamma: ', pars_new[flat_id])
for flat_id in flat_mask:
if (pars_new[flat_id] < prior_vals[flat_id][0]): pars_new[flat_id] = prior_vals[flat_id][0]
elif (pars_new[flat_id] > prior_vals[flat_id][1]): pars_new[flat_id] = prior_vals[flat_id][1]
print('new gamma: ',pars_new[flat_id])
else:
pars_new=pars+_par_step(grad,curve,to_fit,lamda)
chisq_new,grad_new,curve_new=get_ts_curve_derivs_many_funcs(tods,pars_new,npar_fun,funcs,driver=driver)
if chisq_new<chisq:
if myrank==0:
print('accepting with delta_chisq ',chisq_new-chisq,' and lamda ',lamda,pars_new.shape)
print(repr(pars_new))
pars=pars_new
curve=curve_new
grad=grad_new
lamda=update_lamda(lamda,True)
if (chisq-chisq_new<chitol)&(lamda==0):
step,errs=_par_step(grad,curve,temp_to_fit,lamda,return_full=True)
return pars,chisq_new,curve_new,errs
else:
chisq=chisq_new
else:
if myrank==0:
print('rejecting with delta_chisq ',chisq_new-chisq,' and lamda ',lamda)
lamda=update_lamda(lamda,False)
sys.stdout.flush()
if myrank==0:
print("fit_timestreams_with_derivs_manyfun failed to converge after ",maxiter," iterations.")
step,errs=_par_step(grad,curve,temp_to_fit,lamda,return_full=True)
return pars,chisq,curve,errs
def fit_timestreams_with_derivs(func,pars,tods,to_fit=None,to_scale=None,tol=1e-2,chitol=1e-4,maxiter=10,scale_facs=None):
if not(to_fit is None):
#print 'working on creating rotmat'
to_fit=np.asarray(to_fit,dtype='int64')
inds=np.unique(to_fit)
nfloat=np.sum(to_fit==1)
ncovary=np.sum(inds>1)
nfit=nfloat+ncovary
rotmat=np.zeros([len(pars),nfit])
solo_inds=np.where(to_fit==1)[0]
icur=0
for ind in solo_inds:
rotmat[ind,icur]=1.0
icur=icur+1
if ncovary>0:
group_inds=inds[inds>1]
for ind in group_inds:
ii=np.where(to_fit==ind)[0]
rotmat[ii,icur]=1.0
icur=icur+1
else:
rotmat=None
iter=0
converged=False
pp=pars.copy()
lamda=0.0
chi_ref,grad,curve=get_timestream_chisq_curve_deriv_from_func(func,pp,tods,rotmat)
chi_cur=chi_ref
iter=0
while (converged==False) and (iter<maxiter):
iter=iter+1
curve_tmp=curve+lamda*np.diag(np.diag(curve))
#curve_inv=np.linalg.inv(curve_tmp)
curve_inv=invscale(curve_tmp)
shifts=np.dot(curve_inv,grad)
if not(rotmat is None):
shifts_use=np.dot(rotmat,shifts)
else:
shifts_use=shifts
pp_tmp=pp+shifts_use
chi_new=get_timestream_chisq_from_func(func,pp_tmp,tods)
if chi_new<=chi_cur+chitol: #add in a bit of extra tolerance in chi^2 in case we're bopping about the minimum
success=True
else:
success=False
if success:
pp=pp_tmp
chi_cur=chi_new
chi_tmp,grad,curve=get_timestream_chisq_curve_deriv_from_func(func,pp,tods,rotmat)
lamda=update_lamda(lamda,success)
if (lamda==0)&success:
errs=np.sqrt(np.diag(curve_inv))
conv_fac=np.max(np.abs(shifts/errs))
if (conv_fac<tol):
print('we have converged')
converged=True
else:
conv_fac=None
to_print=np.asarray([3600*180.0/np.pi,3600*180.0/np.pi,3600*180.0/np.pi,1.0,1.0,3600*180.0/np.pi,3600*180.0/np.pi,3600*180.0/np.pi*np.sqrt(8*np.log(2)),1.0])*(pp-pars)
print('iter',iter,' max_shift is ',conv_fac,' with lamda ',lamda,chi_ref-chi_cur,chi_ref-chi_new)
return pp,chi_cur
def split_dict(mydict,vec,thresh):
#split a dictionary into sub-dictionaries wherever a gap in vec is larger than thresh.
#useful for e.g. splitting TODs where there's a large time gap due to cuts.
inds=np.where(np.diff(vec)>thresh)[0]
#print(inds,len(inds))
if len(inds)==0:
return [mydict]
ndict=len(inds)+1
inds=np.hstack([[0],inds+1,[len(vec)]])
#print(inds)
out=[None]*ndict
for i in range(ndict):
out[i]={}
for key in mydict.keys():
tmp=mydict[key]
for i in range(ndict):
out[i][key]=tmp
try:
dims=tmp.shape
ndim=len(dims)
if ndim==1:
if dims[0]==len(vec):
for i in range(ndict):
out[i][key]=tmp[inds[i]:inds[i+1]].copy()
if ndim==2:
if dims[1]==len(vec):
for i in range(ndict):
out[i][key]=tmp[:,inds[i]:inds[i+1]].copy()
elif dims[0]==len(vec):
for i in range(ndict):
out[i][key]=tmp[inds[i]:inds[i+1],:].copy()
except:
continue
#print('copying ',key,' unchanged')
#don't need below as it's already copied by default
#for i in range(ndict):
# out[i][key]=mydict[key]
return out
def mask_dict(mydict,mask):
for key in mydict.keys():
tmp=mydict[key]
try:
dims=tmp.shape
ndim=len(dims)
if ndim==1:
if dims[0]==len(mask):
tmp=tmp[mask]
mydict[key]=tmp
if ndim==2:
if dims[0]==len(mask):
tmp=tmp[mask,:]
if dims[1]==len(mask):
tmp=tmp[:,mask]
mydict[key]=tmp
if ndim==3:
if dims[0]==len(mask):
tmp=tmp[mask,:,:]
if dims[1]==len(mask):
tmp=tmp[:,mask,:]
if dims[2]==len(maks):
tmp=tmp[:,:,mask]
mydict[key]=tmp
except:
continue
| 187,717 | 34.674268 | 578 | py |
minkasi | minkasi-master/minkasi/mkfftw.py | import os
import numpy
import ctypes
import time
try:
mylib=ctypes.cdll.LoadLibrary("libmkfftw.so")
except OSError:
mylib=ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(os.path.abspath(__file__)), "libmkfftw.so"))
many_fft_r2c_1d_c=mylib.many_fft_r2c_1d
many_fft_r2c_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int]
many_fftf_r2c_1d_c=mylib.many_fftf_r2c_1d
many_fftf_r2c_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int]
many_fft_c2r_1d_c=mylib.many_fft_c2r_1d
many_fft_c2r_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int]
many_fftf_c2r_1d_c=mylib.many_fftf_c2r_1d
many_fftf_c2r_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int]
fft_r2r_1d_c=mylib.fft_r2r_1d
fft_r2r_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int]
many_fft_r2r_1d_c=mylib.many_fft_r2r_1d
many_fft_r2r_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int]
many_fftf_r2r_1d_c=mylib.many_fftf_r2r_1d
many_fftf_r2r_1d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_int,ctypes.c_int]
fft_r2c_n_c=mylib.fft_r2c_n
fft_r2c_n_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_void_p]
fft_c2r_n_c=mylib.fft_c2r_n
fft_c2r_n_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_int,ctypes.c_void_p]
fft_r2c_3d_c=mylib.fft_r2c_3d
fft_r2c_3d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p]
fft_c2r_3d_c=mylib.fft_c2r_3d
fft_c2r_3d_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p,ctypes.c_void_p]
set_threaded_c=mylib.set_threaded
set_threaded_c.argtypes=[ctypes.c_int]
read_wisdom_c=mylib.read_wisdom
read_wisdom_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p]
write_wisdom_c=mylib.write_wisdom
write_wisdom_c.argtypes=[ctypes.c_void_p,ctypes.c_void_p]
def set_threaded(n=-1):
set_threaded_c(n)
def rfftn(dat):
myshape=dat.shape
myshape=numpy.asarray(myshape,dtype='int32')
myshape2=myshape.copy()
myshape2[-1]=(myshape2[-1]//2+1)
datft=numpy.zeros(myshape2,dtype='complex')
fft_r2c_n_c(dat.ctypes.data,datft.ctypes.data,len(myshape),myshape.ctypes.data)
return datft
def irfftn(datft,iseven=True,preserve_input=True):
#the c2r transforms destroy input. if you want to keep the input
#around, then we need to copy the incoming data.
if preserve_input:
datft=datft.copy()
myshape=datft.shape
myshape=numpy.asarray(myshape,dtype='int32')
myshape2=myshape.copy()
if iseven:
myshape2[-1]=2*(myshape2[-1]-1)
else:
myshape2[-1]=2*myshape2[-1]-1
#print(myshape2)
dat=numpy.empty(myshape2,dtype='float64')
fft_c2r_n_c(datft.ctypes.data,dat.ctypes.data,len(myshape2),myshape2.ctypes.data)
return dat
def fft_r2c_3d(dat):
myshape=dat.shape
assert(len(myshape)==3)
myshape=numpy.asarray(myshape,dtype='int')
myshape2=myshape.copy()
myshape2[-1]=(myshape2[-1]//2+1)
datft=numpy.zeros(myshape2,dtype='complex')
fft_r2c_3d_c(dat.ctypes.data,datft.ctypes.data,myshape.ctypes.data)
return datft
def fft_c2r_3d(datft,iseven=True,preserve_input=True):
#the c2r transforms destroy input. if you want to keep the input
#around, then we need to copy the incoming data.
if preserve_input:
datft=datft.copy()
myshape=datft.shape
assert(len(myshape)==3)
myshape=numpy.asarray(myshape,dtype='int')
myshape2=myshape.copy()
if iseven:
myshape2[-1]=2*(myshape2[-1]-1)
else:
myshape2[-1]=2*myshape2[-1]-1
#print(myshape2)
dat=numpy.empty(myshape2,dtype='float64')
fft_c2r_3d_c(datft.ctypes.data,dat.ctypes.data,myshape2.ctypes.data)
return dat
def fft_r2c(dat):
ndat=dat.shape[1]
ntrans=dat.shape[0]
if dat.dtype==numpy.dtype('float64'):
#datft=numpy.zeros(dat.shape,dtype=complex)
datft=numpy.empty(dat.shape,dtype=complex)
many_fft_r2c_1d_c(dat.ctypes.data,datft.ctypes.data,ntrans,ndat,ndat,ndat)
else:
assert(dat.dtype==numpy.dtype('float32'))
datft=numpy.empty(dat.shape,dtype='complex64')
many_fftf_r2c_1d_c(dat.ctypes.data,datft.ctypes.data,ntrans,ndat,ndat,ndat)
return datft
def fft_c2r(datft):
ndat=datft.shape[1]
ntrans=datft.shape[0]
if datft.dtype==numpy.dtype('complex128'):
dat=numpy.zeros(datft.shape)
many_fft_c2r_1d_c(datft.ctypes.data,dat.ctypes.data,ntrans,ndat,ndat,ndat)
dat=dat/ndat
else:
assert(datft.dtype==numpy.dtype('complex64'))
dat=numpy.zeros(datft.shape,dtype='float32')
many_fftf_c2r_1d_c(datft.ctypes.data,dat.ctypes.data,ntrans,ndat,ndat,ndat)
dat=dat/numpy.float32(ndat)
return dat
def fft_r2r_1d(dat,kind=1):
nn=dat.size
trans=numpy.zeros(nn)
fft_r2r_1d_c(dat.ctypes.data,trans.ctypes.data,nn,kind)
return trans
def fft_r2r(dat,trans=None,kind=1):
if len(dat.shape)==1:
return fft_r2r_1d(dat,kind)
ntrans=dat.shape[0]
n=dat.shape[1]
#trans=numpy.zeros([ntrans,n],dtype=type(dat[0,0]))
if trans is None:
trans=numpy.empty([ntrans,n],dtype=type(dat[0,0]))
if type(dat[0,0])==numpy.dtype('float32'):
#print 'first two element in python are ',dat[0,0],dat[0,1]
many_fftf_r2r_1d_c(dat.ctypes.data,trans.ctypes.data,n,kind,ntrans)
else:
many_fft_r2r_1d_c(dat.ctypes.data,trans.ctypes.data,n,kind,ntrans)
return trans
def read_wisdom(double_file='.fftw_wisdom',single_file='.fftwf_wisdom'):
df=numpy.zeros(len(double_file)+1,dtype='int8')
df[0:-1]=[ord(c) for c in double_file]
sf=numpy.zeros(len(single_file)+1,dtype='int8')
sf[0:-1]=[ord(c) for c in single_file]
read_wisdom_c(df.ctypes.data,sf.ctypes.data)
def write_wisdom(double_file='.fftw_wisdom',single_file='.fftwf_wisdom'):
df=numpy.zeros(len(double_file)+1,dtype='int8')
df[0:-1]=[ord(c) for c in double_file]
sf=numpy.zeros(len(single_file)+1,dtype='int8')
sf[0:-1]=[ord(c) for c in single_file]
write_wisdom_c(df.ctypes.data,sf.ctypes.data)
| 6,217 | 31.385417 | 113 | py |
minkasi | minkasi-master/minkasi/pyregion_tools.py | import pyregion
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import copy
__all__ = ["region_binner",
"bootstrap"]
def bootstrap(data, n = 10000):
"""
Bootstraps data.
Given an input data vector, the bootstrapping proceedure selects a random subsample of data, with replacement, and computes a statistic
on that subsample. TODO: currently only mean is supported, should add others. The variance of the resulting statistics is a good measure
of the true variance of that statistic. In other words, if we have some data, and we compute mean(data) and want to know what var(data)
is, bootstrapping is a good way to do that.
Parameters
----------
data : numpy.array
array of data which we wish to bootstrap
n : int, optional
number of instances of bootstrapping to perform
Returns
-------
stats : numpy.array
array of average of the bootstrapped samples
"""
stats = np.zeros(n)
data = np.array(data)
for i in range(n):
flags = np.random.randint(len(data), size = len(data))
stats[i] = np.mean(data[flags])
return stats
def region_binner(shapeList, hdu, plot = False, return_rs = True):
"""
Helper function for making binned profiles from pyregion and map.
This function computes the binned mean and variance in regions of a map as specified by a pyregion ShapeList. This function handles
the various different types of pyregions and sepcifications, i.e. specifying multiple regions explicitly or using the pyregion
defualt radial or angular bins.
Parameters
----------
shapeList : 'pyregion.core.ShapeList'
pyregion ShapeList specifying the regions overwhich to compute the profile bins.
hdu : 'astropy.io.fits.hdu.hdulist.HDUList'
astropy HDUList containing the relevant map at index 0. Must contain the map data at hdu[0].data
plot : Bool, optional
specifies whether to plot the bin regions
Returns
-------
means : np.array(float)
simple average of data within each binning region
var : np.array(float)
variance within the same regions as computed by bootstrapping the pixels
"""
if len(shapeList) > 1:
#Handles multiple explicitly defined regions of any type. Since they are explicitly defined one function can handle all
return _region_binner_explicit_regions(shapeList, hdu, plot = plot, return_rs = return_rs)
elif shapeList[0].name == 'epanda':
#Handles epanda ShapeLists where n radial bins has been specified
return _region_binner_epanda_rbins(shapeList, hdu, plot = plot)
else:
print('Error: region type {} not currently supported'.format(shapeList[0].name))
return
def _region_binner_explicit_regions(shapeList, hdu, plot = False, return_rs = True):
"""
Anulizing function for shapeList where each region has been explicilty defined.
This function takes a list of regions and compute the average and variance within each of those regions.
It assumes that each region is explicitly defined, meaning there are no radial bins, angular bins, etc. within a region.
Further this function assumes that you know what youre doing. For example you can hand it a list of overlapping regions and
it will return their means/vars without warning. Currently only supported method for computing variance is bootstrapping of
pixels but TODO add more methods. Since the regions are all explicitly defined we don't need to compute any bins/regions
ourselves and this function can handle all types of regions.
Parameters
----------
shapeList : 'pyregion.core.ShapeList'
pyregion ShapeList specifying the regions overwhich to compute the profile bins.
hdu : 'astropy.io.fits.hdu.hdulist.HDUList'
astropy HDUList containing the relevant map at index 0. Must contain the map data at hdu[0].data
plot : Bool, optional
specifies whether to plot the bin regions
return_rs : Bool, optional
specifies whether to generate and return radii associated with means/vars
Returns
-------
means : np.array(float)
simple average of data within each binning region
var : np.array(float)
variance within the same regions as computed by bootstrapping the pixels
rs : np.array(float)
region radii associated with means/var
"""
means = np.zeros(len(shapeList))
var = np.zeros(len(shapeList))
filters = shapeList.get_filter()
for i in range(len(var)):
cur_filter = filters[i]
tempmask = cur_filter.mask(hdu[0].data.shape)
#Sets values in the map outside the mask to zero. This may be cleaner with masked arrays
masked_data = hdu[0].data*tempmask
if plot:
plt.imshow(masked_data,origin = 'lower')
plt.show()
plt.close()
#Since the masked data points are still in the map just set to zero, we have to extract those with non-zero value
#to compute the mean/var. This is the part that would be cleaner with masked arrays.
vals = masked_data[np.abs(masked_data)>1e-10]
means[i] = np.mean(vals)
var[i] = np.var(bootstrap(vals))
if return_rs:
rs = np.zeros(len(means))
header = hdu[0].header
cdelt = header.get('CDELT2', 1.0)
for i in range(len(rs)):
if shapeList[i].name == 'epanda':
#If region is epanda, return the average of r_semimajor at the endpoints of the region
rs[i] = np.mean([shapeList[i].coord_list[5], shapeList[i].coord_list[7]])*cdelt*3600
elif shapeList[i].name == 'panda':
#If region is panda, return average of r at endpoints of region
rs[i] = np.mean(shapeList[i].coord_list[4:5])*cdelt*3600
else:
print("Error: region type {} not supported for automatic radii generation".format(shapeList[i].name))
return means, var
return means, var, rs
return means, var
def _region_binner_epanda_rbins(shapeList, hdu, plot=False, return_rs = True):
# 0 1 2 3 4 5 6 7 8 9 10
#Epanda has format (ra, dec, ang_start, ang_end, n_ang_bins, inner a, Inner b, outer a, outer b, number of radial bins, PA)
coords = shapeList[0].coord_list
step_a = (coords[7]-coords[5])/coords[-2]
step_b = (coords[8]-coords[6])/coords[-2]
#N bins located at second to last
means = np.zeros(coords[-2])
var = np.zeros(coords[-2])
if return_rs:
rs = np.zeros(coords[-2])
for i in range(coords[-2]):
temp_r = copy.deepcopy(shapeList)
#note that the inner_a and outer_a in the coord_list are for the region as a whole
inner_a = coords[5] + i*step_a
outer_a = coords[5] + (i)*step_a+step_a
inner_b = coords[6] + i*step_b
outer_b = coords[6] + (i)*step_b+step_b
temp_r[0].coord_list[5] = inner_a
temp_r[0].coord_list[7] = outer_a
temp_r[0].coord_list[6] = inner_b
temp_r[0].coord_list[8] = outer_b
temp_r[0].coord_list[-2] = 1
tempfilter = temp_r.get_filter()
tempmask = tempfilter.mask(hdu[0].data.shape)
if plot:
plt.imshow(hdu[0].data*tempmask,origin = 'lower')
plt.show()
plt.close()
masked_data = hdu[0].data*tempmask
vals = masked_data[np.abs(masked_data)>1e-8]
means[i] = np.mean(vals)
var[i] = np.var(bootstrap(vals))
if return_rs:
rs[i] = inner_a + i*step_a/2
if return_rs:
return means, var, rs
return means, var
| 7,945 | 36.658768 | 140 | py |
minkasi | minkasi-master/minkasi/__init__.py | from .minkasi import *
| 23 | 11 | 22 | py |
minkasi | minkasi-master/minkasi/zernike.py | import numpy
def zernike_column(m,nmax,rmat):
"""Generate the radial part of zernike polynomials for all n from m up to nmax"""
if ((m-nmax)%2!=0):
#print 'm an n must have same parity'
#return None
#if parity is wrong, then drop nmax by one. makes external loop to generate all zns much simpler
nmax=nmax-1
if (m>nmax):
print 'm may not be larger than n'
return None
nm=(nmax-m)/2+1
mask=rmat>1
zn=[None]*nm
nn=numpy.zeros(nm,dtype='int')
zn[0]=rmat**m
zn[0][mask]=0
nn[0]=m
if nm==1:
return zn,nn
rsqr=rmat**2
zn[1]=((m+2)*rsqr-m-1)*zn[0]
zn[1][mask]=0
nn[1]=m+2
if nm==2:
return zn,nn
ii=2
for n in range(m+4,nmax+1,2):
f1=2*(n-1)*(2*n*(n-2)*rsqr-m*m-n*(n-2))*zn[ii-1]
f2=n*(n+m-2)*(n-m-2)*zn[ii-2]
f3=1.0/((n+m)*(n-m)*(n-2))
zn[ii]=(f1-f2)*f3
nn[ii]=n
ii=ii+1
return zn,nn
def all_zernike(n,r,th):
znvec=[None]*(n+1)
nvec=[None]*(n+1)
mvec=[None]*(n+1)
nzer=0
for m in range(0,n+1):
znvec[m],nvec[m]=zernike_column(m,n,r)
mvec[m]=0*nvec[m]+m
if m==0:
nzer=nzer+len(znvec[m])
else:
nzer=nzer+2*len(znvec[m])
#print nzer
shp=r.shape
shp=numpy.append(nzer,shp)
zns=numpy.zeros(shp)
#print shp
icur=0
#print n,len(znvec)
for m in range(0,n+1):
#print icur
ss=numpy.sin(m*th)
cc=numpy.cos(m*th)
zz=znvec[m]
nn=len(zz)
if m==0:
for i in range(nn):
zns[icur,:]=zz[i]
icur=icur+1
else:
for i in range(nn):
zns[icur,:]=zz[i]*cc
icur=icur+1
zns[icur,:]=zz[i]*ss
icur=icur+1
return zns,znvec
| 1,894 | 20.534091 | 105 | py |
tbsm | tbsm-main/tbsm_pytorch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
### import packages ###
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import time
import os
from os import path
import random
# numpy and scikit-learn
import numpy as np
from sklearn.metrics import roc_auc_score
# pytorch
import torch
import torch.nn as nn
import torch.nn.functional as Functional
from torch.nn.parameter import Parameter
from torch.utils.tensorboard import SummaryWriter
# tbsm data
import tbsm_data_pytorch as tp
# set python, numpy and torch random seeds
def set_seed(seed, use_gpu):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
if use_gpu:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
### define time series layer (TSL) ###
class TSL_Net(nn.Module):
def __init__(
self,
arch_interaction_op='dot',
arch_attention_mechanism='mlp',
ln=None,
model_type="tsl",
tsl_inner="def",
mha_num_heads=8,
ln_top=""
):
super(TSL_Net, self).__init__()
# save arguments
self.arch_interaction_op = arch_interaction_op
self.arch_attention_mechanism = arch_attention_mechanism
self.model_type = model_type
self.tsl_inner = tsl_inner
# setup for mechanism type
if self.arch_attention_mechanism == 'mlp':
self.mlp = dlrm.DLRM_Net().create_mlp(ln, len(ln) - 2)
# setup extra parameters for some of the models
if self.model_type == "tsl" and self.tsl_inner in ["def", "ind"]:
m = ln_top[-1] # dim of dlrm output
mean = 0.0
std_dev = np.sqrt(2 / (m + m))
W = np.random.normal(mean, std_dev, size=(1, m, m)).astype(np.float32)
self.A = Parameter(torch.tensor(W), requires_grad=True)
elif self.model_type == "mha":
m = ln_top[-1] # dlrm output dim
self.nheads = mha_num_heads
self.emb_m = self.nheads * m # mha emb dim
mean = 0.0
std_dev = np.sqrt(2 / (m + m)) # np.sqrt(1 / m) # np.sqrt(1 / n)
qm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.Q = Parameter(torch.tensor(qm), requires_grad=True)
km = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.K = Parameter(torch.tensor(km), requires_grad=True)
vm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.V = Parameter(torch.tensor(vm), requires_grad=True)
def forward(self, x=None, H=None):
# adjust input shape
(batchSize, vector_dim) = x.shape
x = torch.reshape(x, (batchSize, 1, -1))
x = torch.transpose(x, 1, 2)
# debug prints
# print("shapes: ", self.A.shape, x.shape)
# perform mode operation
if self.model_type == "tsl":
if self.tsl_inner == "def":
ax = torch.matmul(self.A, x)
x = torch.matmul(self.A.permute(0, 2, 1), ax)
# debug prints
# print("shapes: ", H.shape, ax.shape, x.shape)
elif self.tsl_inner == "ind":
x = torch.matmul(self.A, x)
# perform interaction operation
if self.arch_interaction_op == 'dot':
if self.arch_attention_mechanism == 'mul':
# coefficients
a = torch.transpose(torch.bmm(H, x), 1, 2)
# context
c = torch.bmm(a, H)
elif self.arch_attention_mechanism == 'mlp':
# coefficients
a = torch.transpose(torch.bmm(H, x), 1, 2)
# MLP first/last layer dims are automatically adjusted to ts_length
y = dlrm.DLRM_Net().apply_mlp(a, self.mlp)
# context, y = mlp(a)
c = torch.bmm(torch.reshape(y, (batchSize, 1, -1)), H)
else:
sys.exit('ERROR: --arch-attention-mechanism='
+ self.arch_attention_mechanism + ' is not supported')
else:
sys.exit('ERROR: --arch-interaction-op=' + self.arch_interaction_op
+ ' is not supported')
elif self.model_type == "mha":
x = torch.transpose(x, 1, 2)
Qx = torch.transpose(torch.matmul(x, self.Q), 0, 1)
HK = torch.transpose(torch.matmul(H, self.K), 0, 1)
HV = torch.transpose(torch.matmul(H, self.V), 0, 1)
# multi-head attention (mha)
multihead_attn = nn.MultiheadAttention(self.emb_m, self.nheads).to(x.device)
attn_output, _ = multihead_attn(Qx, HK, HV)
# context
c = torch.squeeze(attn_output, dim=0)
# debug prints
# print("shapes:", c.shape, Qx.shape)
return c
### define Time-based Sequence Model (TBSM) ###
class TBSM_Net(nn.Module):
def __init__(
self,
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op,
arch_interaction_itself,
ln_mlp,
ln_tsl,
tsl_interaction_op,
tsl_mechanism,
ts_length,
ndevices=-1,
model_type="",
tsl_seq=False,
tsl_proj=True,
tsl_inner="def",
tsl_num_heads=1,
mha_num_heads=8,
rnn_num_layers=5,
debug_mode=False,
):
super(TBSM_Net, self).__init__()
# save arguments
self.ndevices = ndevices
self.debug_mode = debug_mode
self.ln_bot = ln_bot
self.ln_top = ln_top
self.ln_tsl = ln_tsl
self.ts_length = ts_length
self.tsl_interaction_op = tsl_interaction_op
self.tsl_mechanism = tsl_mechanism
self.model_type = model_type
self.tsl_seq = tsl_seq
self.tsl_proj = tsl_proj
self.tsl_inner = tsl_inner
self.tsl_num_heads = tsl_num_heads
self.mha_num_heads = mha_num_heads
self.rnn_num_layers = rnn_num_layers
self.ams = nn.ModuleList()
self.mlps = nn.ModuleList()
if self.model_type == "tsl":
self.num_mlps = int(self.tsl_num_heads) # number of tsl components
else:
self.num_mlps = 1
#debug prints
if self.debug_mode:
print(self.model_type)
print(ln_bot)
print(ln_top)
print(ln_emb)
# embedding layer (implemented through dlrm tower, without last layer sigmoid)
if "qr" in model_type:
self.dlrm = dlrm.DLRM_Net(
m_spa, ln_emb, ln_bot, ln_top,
arch_interaction_op, arch_interaction_itself,
qr_flag=True, qr_operation="add", qr_collisions=4, qr_threshold=100000
)
print("Using QR embedding method.")
else:
self.dlrm = dlrm.DLRM_Net(
m_spa, ln_emb, ln_bot, ln_top,
arch_interaction_op, arch_interaction_itself,
)
# prepare data needed for tsl layer construction
if self.model_type == "tsl":
if not self.tsl_seq:
self.ts_array = [self.ts_length] * self.num_mlps
else:
self.ts_array = []
m = self.ts_length / self.tsl_num_heads
for j in range(self.tsl_num_heads, 0, -1):
t = min(self.ts_length, round(m * j))
self.ts_array.append(t)
elif self.model_type == "mha":
self.ts_array = [self.ts_length]
else:
self.ts_array = []
# construction of one or more tsl components
for ts in self.ts_array:
ln_tsl = np.concatenate((np.array([ts]), self.ln_tsl))
ln_tsl = np.append(ln_tsl, ts)
# create tsl mechanism
am = TSL_Net(
arch_interaction_op=self.tsl_interaction_op,
arch_attention_mechanism=self.tsl_mechanism,
ln=ln_tsl, model_type=self.model_type,
tsl_inner=self.tsl_inner,
mha_num_heads=self.mha_num_heads, ln_top=self.ln_top,
)
self.ams.append(am)
# tsl MLPs (with sigmoid on last layer)
for _ in range(self.num_mlps):
mlp_tsl = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
self.mlps.append(mlp_tsl)
# top mlp if needed
if self.num_mlps > 1:
f_mlp = np.array([self.num_mlps, self.num_mlps + 4, 1])
self.final_mlp = dlrm.DLRM_Net().create_mlp(f_mlp, f_mlp.size - 2)
# Offsets need to be stored beforehand if args.run_fast.
if args.run_fast:
# Constant offsets tensor - resize if needed.
self.max_offset = 10000000
self.offsets = torch.tensor(list(range(self.max_offset)))
self.offsets_moved = False
def forward(self, x, lS_o, lS_i):
# Move offsets to device if needed and not already done.
if args.run_fast and not self.offsets_moved:
self.offsets = self.offsets.to(x[0].device)
self.offsets_moved = True
# data point is history H and last entry w
n = x[0].shape[0] # batch_size
ts = len(x)
H = torch.zeros(n, self.ts_length, self.ln_top[-1]).to(x[0].device)
# Compute H using either fast or original approach depending on args.run_fast.
if args.run_fast:
# j determines access indices of input; first, determine j bounds and get all inputs.
j_lower = (ts - self.ts_length - 1)
j_upper = (ts - 1)
# Concatenate x[j]s using j bounds.
concatenated_x = torch.cat(x[j_lower : j_upper])
# Set offsets and increase size if needed.
curr_max_offset = (j_upper - j_lower) * n
if curr_max_offset > self.max_offset + 1:
# Resize offsets to 2x required size.
self.offsets = torch.tensor(list(range(curr_max_offset * 2))).to(self.offsets.device)
self.max_offset = curr_max_offset * 2
concatenated_lS_o = [self.offsets[: curr_max_offset] for j in range(len(lS_o[0]))]
# Concatenate lS_i[0, 1, 2]s.
concatenated_lS_i = [torch.cat([lS_i[i][j] for i in range(j_lower, j_upper)]) for j in range(len(lS_i[0]))]
# oj determines access indices of output; determine oj bounds to assign output values in H. oj is just j indices adjusted to start at 0.
oj_lower = 0 - (ts - self.ts_length - 1)
oj_upper = (ts - 1) - (ts - self.ts_length - 1)
# After fetching all inputs, run through DLRM.
concatenated_dlrm_output = self.dlrm(concatenated_x, concatenated_lS_o, concatenated_lS_i)
# Reshape output with new TS dimension and transpose to get H output.
transposed_concatenated_dlrm_output = torch.transpose(concatenated_dlrm_output.reshape((j_upper - j_lower), n, self.ln_top[-1]), 0, 1)
if self.model_type == "tsl" and self.tsl_proj:
dlrm_output = Functional.normalize(transposed_concatenated_dlrm_output, p=2, dim=2)
else:
dlrm_output = transposed_concatenated_dlrm_output
# Assign the output to H with correct output bounds.
H[:, oj_lower : oj_upper, :] = dlrm_output
else:
# split point into first part (history)
# and last item
for j in range(ts - self.ts_length - 1, ts - 1):
oj = j - (ts - self.ts_length - 1)
v = self.dlrm(x[j], lS_o[j], lS_i[j])
if self.model_type == "tsl" and self.tsl_proj:
v = Functional.normalize(v, p=2, dim=1)
H[:, oj, :] = v
w = self.dlrm(x[-1], lS_o[-1], lS_i[-1])
# project onto sphere
if self.model_type == "tsl" and self.tsl_proj:
w = Functional.normalize(w, p=2, dim=1)
# print("data: ", x[-1], lS_o[-1], lS_i[-1])
(mini_batch_size, _) = w.shape
# for cases when model is tsl or mha
if self.model_type != "rnn":
# create MLP for each TSL component
# each ams[] element is one component
for j in range(self.num_mlps):
ts = self.ts_length - self.ts_array[j]
c = self.ams[j](w, H[:, ts:, :])
c = torch.reshape(c, (mini_batch_size, -1))
# concat context and w
z = torch.cat([c, w], dim=1)
# obtain probability of a click as a result of MLP
p = dlrm.DLRM_Net().apply_mlp(z, self.mlps[j])
if j == 0:
ps = p
else:
ps = torch.cat((ps, p), dim=1)
if ps.shape[1] > 1:
p_out = dlrm.DLRM_Net().apply_mlp(ps, self.final_mlp)
else:
p_out = ps
# RNN based on LSTM cells case, context is final hidden state
else:
hidden_dim = w.shape[1] # equal to dim(w) = dim(c)
level = self.rnn_num_layers # num stacks of rnns
Ht = H.permute(1, 0, 2)
rnn = nn.LSTM(int(self.ln_top[-1]), int(hidden_dim),
int(level)).to(x[0].device)
h0 = torch.randn(level, n, hidden_dim).to(x[0].device)
c0 = torch.randn(level, n, hidden_dim).to(x[0].device)
output, (hn, cn) = rnn(Ht, (h0, c0))
hn, cn = torch.squeeze(hn[level - 1, :, :]), \
torch.squeeze(cn[level - 1, :, :])
if self.debug_mode:
print(w.shape, output.shape, hn.shape)
# concat context and w
z = torch.cat([hn, w], dim=1)
p_out = dlrm.DLRM_Net().apply_mlp(z, self.mlps[0])
return p_out
# construct tbsm model or read it from the file specified
# by args.save_model
def get_tbsm(args, use_gpu):
# train, test, or train-test
modes = args.mode.split("-")
model_file = args.save_model
if args.debug_mode:
print("model_file: ", model_file)
print("model_type: ", args.model_type)
if use_gpu:
ngpus = torch.cuda.device_count() # 1
devicenum = "cuda:" + str(args.device_num % ngpus)
print("device:", devicenum)
device = torch.device(devicenum)
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
# prepare dlrm arch
m_spa = args.arch_sparse_feature_size
# this is an array of sizes of cat features
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
num_fea = ln_emb.size + 1 # user: num sparse + bot_mlp(all dense)
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# m_den = ln_bot[0]
ln_bot[ln_bot.size - 1] = m_spa # enforcing
m_den_out = ln_bot[ln_bot.size - 1] # must be == m_spa (embed dim)
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sigmoid_top = len(ln_top) - 2 # used only if length_ts == 1
# attention mlp (will be automatically adjusted so that first and last
# layer correspond to number of vectors (ts_length) used in attention)
ln_atn = np.fromstring(args.tsl_mlp, dtype=int, sep="-")
# context MLP (with automatically adjusted first layer)
if args.model_type == "mha":
num_cat = (int(args.mha_num_heads) + 1) * ln_top[-1] # mha with k heads + w
else: # tsl or rnn
num_cat = 2 * ln_top[-1] # [c,w]
arch_mlp_adjusted = str(num_cat) + "-" + args.arch_mlp
ln_mlp = np.fromstring(arch_mlp_adjusted, dtype=int, sep="-")
# construct TBSM
tbsm = TBSM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
args.arch_interaction_op,
args.arch_interaction_itself,
ln_mlp,
ln_atn,
args.tsl_interaction_op,
args.tsl_mechanism,
args.ts_length,
-1,
args.model_type,
args.tsl_seq,
args.tsl_proj,
args.tsl_inner,
args.tsl_num_heads,
args.mha_num_heads,
args.rnn_num_layers,
args.debug_mode,
)
# move model to gpu
if use_gpu:
tbsm = tbsm.to(device) # .cuda()
# load existing pre-trained model if needed
if path.exists(model_file):
if modes[0] == "test" or (len(modes) > 1 and modes[1] == "test"):
if use_gpu:
ld_model = torch.load(
model_file,
map_location=torch.device('cuda')
)
else:
# when targeting inference on CPU
ld_model = torch.load(model_file, map_location=torch.device('cpu'))
tbsm.load_state_dict(ld_model['model_state_dict'])
return tbsm, device
def data_wrap(X, lS_o, lS_i, use_gpu, device):
if use_gpu: # .cuda()
return ([xj.to(device) for xj in X],
[[S_o.to(device) for S_o in row] for row in lS_o],
[[S_i.to(device) for S_i in row] for row in lS_i])
else:
return X, lS_o, lS_i
def time_wrap(use_gpu, device):
if use_gpu:
torch.cuda.synchronize(device)
return time.time()
def loss_fn_wrap(Z, T, use_gpu, device):
if use_gpu:
return loss_fn(Z, T.to(device))
else:
return loss_fn(Z, T)
loss_fn = torch.nn.BCELoss(reduction="mean")
# iterate through validation data, which can be used to determine the best seed and
# during main training for deciding to save the current model
def iterate_val_data(val_ld, tbsm, use_gpu, device):
# NOTE: call to tbsm.eval() not needed here, see
# https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615
total_loss_val = 0
total_accu_test = 0
total_samp_test = 0
for _, (X, lS_o, lS_i, T_test) in enumerate(val_ld):
batchSize = X[0].shape[0]
Z_test = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
# # compute loss and accuracy
z = Z_test.detach().cpu().numpy() # numpy array
t = T_test.detach().cpu().numpy() # numpy array
A_test = np.sum((np.round(z, 0) == t).astype(np.uint8))
total_accu_test += A_test
total_samp_test += batchSize
E_test = loss_fn_wrap(Z_test, T_test, use_gpu, device)
L_test = E_test.detach().cpu().numpy() # numpy array
total_loss_val += (L_test * batchSize)
return total_accu_test, total_samp_test, total_loss_val
# iterate through training data, which is called once every epoch. It updates weights,
# computes loss, accuracy, saves model if needed and calls iterate_val_data() function.
# isMainTraining is True for main training and False for fast seed selection
def iterate_train_data(args, train_ld, val_ld, tbsm, k, use_gpu, device, writer, losses, accuracies, isMainTraining):
# select number of batches
if isMainTraining:
nbatches = len(train_ld) if args.num_batches == 0 else args.num_batches
else:
nbatches = len(train_ld)
# specify the optimizer algorithm
optimizer = torch.optim.Adagrad(tbsm.parameters(), lr=args.learning_rate)
total_time = 0
total_loss = 0
total_accu = 0
total_iter = 0
total_samp = 0
max_gA_test = 0
for j, (X, lS_o, lS_i, T) in enumerate(train_ld):
if j >= nbatches:
break
t1 = time_wrap(use_gpu, device)
batchSize = X[0].shape[0]
# forward pass
Z = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
# loss
E = loss_fn_wrap(Z, T, use_gpu, device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
z = Z.detach().cpu().numpy() # numpy array
t = T.detach().cpu().numpy() # numpy array
# rounding t
A = np.sum((np.round(z, 0) == np.round(t, 0)).astype(np.uint8))
optimizer.zero_grad()
# backward pass
E.backward(retain_graph=True)
# weights update
optimizer.step()
t2 = time_wrap(use_gpu, device)
total_time += t2 - t1
total_loss += (L * batchSize)
total_accu += A
total_iter += 1
total_samp += batchSize
print_tl = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches)
# print time, loss and accuracy
if print_tl and isMainTraining:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
gL = total_loss / total_samp
total_loss = 0
gA = total_accu / total_samp
total_accu = 0
str_run_type = "inference" if args.inference_only else "training"
print(
"Finished {} it {}/{} of epoch {}, ".format(
str_run_type, j + 1, nbatches, k
)
+ "{:.2f} ms/it, loss {:.8f}, accuracy {:3.3f} %".format(
gT, gL, gA * 100
)
)
total_iter = 0
total_samp = 0
if isMainTraining:
should_test = (
(args.test_freq > 0
and (j + 1) % args.test_freq == 0) or j + 1 == nbatches
)
else:
should_test = (j == min(int(0.05 * len(train_ld)), len(train_ld) - 1))
# validation run
if should_test:
total_accu_test, total_samp_test, total_loss_val = iterate_val_data(val_ld, tbsm, use_gpu, device)
gA_test = total_accu_test / total_samp_test
if not isMainTraining:
break
gL_test = total_loss_val / total_samp_test
print("At epoch {:d} validation accuracy is {:3.3f} %".
format(k, gA_test * 100))
if args.enable_summary and isMainTraining:
writer.add_scalars('train and val loss',
{'train_loss': gL,
'val_loss': gL_test},
k * len(train_ld) + j)
writer.add_scalars('train and val accuracy',
{'train_acc': gA * 100,
'val_acc': gA_test * 100},
k * len(train_ld) + j)
losses = np.append(losses, np.array([[j, gL, gL_test]]),
axis=0)
accuracies = np.append(accuracies, np.array([[j, gA * 100,
gA_test * 100]]), axis=0)
# save model if best so far
if gA_test > max_gA_test and isMainTraining:
print("Saving current model...")
max_gA_test = gA_test
model_ = tbsm
torch.save(
{
"model_state_dict": model_.state_dict(),
# "opt_state_dict": optimizer.state_dict(),
},
args.save_model,
)
if not isMainTraining:
return gA_test
# selects best seed, and does main model training
def train_tbsm(args, use_gpu):
# prepare the data
train_ld, _ = tp.make_tbsm_data_and_loader(args, "train")
val_ld, _ = tp.make_tbsm_data_and_loader(args, "val")
# setup initial values
isMainTraining = False
writer = SummaryWriter()
losses = np.empty((0,3), np.float32)
accuracies = np.empty((0,3), np.float32)
# selects best seed out of 5. Sometimes Adagrad gets stuck early, this
# seems to occur randomly depending on initial weight values and
# is independent of chosen model: N-inner, dot etc.
# this procedure is used to reduce the probability of this happening.
def select(args):
seeds = np.random.randint(2, 10000, size=5)
if args.debug_mode:
print(seeds)
best_index = 0
max_val_accuracy = 0.0
testpoint = min(int(0.05 * len(train_ld)), len(train_ld) - 1)
print("testpoint, total batches: ", testpoint, len(train_ld))
for i, seed in enumerate(seeds):
set_seed(seed, use_gpu)
tbsm, device = get_tbsm(args, use_gpu)
gA_test = iterate_train_data(args, train_ld, val_ld, tbsm, 0, use_gpu,
device, writer, losses, accuracies,
isMainTraining)
if args.debug_mode:
print("select: ", i, seed, gA_test, max_val_accuracy)
if gA_test > max_val_accuracy:
best_index = i
max_val_accuracy = gA_test
return seeds[best_index]
# select best seed if needed
if args.no_select_seed or path.exists(args.save_model):
seed = args.numpy_rand_seed
else:
print("Choosing best seed...")
seed = select(args)
set_seed(seed, use_gpu)
print("selected seed:", seed)
# create or load TBSM
tbsm, device = get_tbsm(args, use_gpu)
if args.debug_mode:
print("initial parameters (weights and bias):")
for name, param in tbsm.named_parameters():
print(name)
print(param.detach().cpu().numpy())
# main training loop
isMainTraining = True
print("time/loss/accuracy (if enabled):")
with torch.autograd.profiler.profile(args.enable_profiling, use_gpu) as prof:
for k in range(args.nepochs):
iterate_train_data(args, train_ld, val_ld, tbsm, k, use_gpu, device,
writer, losses, accuracies, isMainTraining)
# collect metrics and other statistics about the run
if args.enable_summary:
with open('summary.npy', 'wb') as acc_loss:
np.save(acc_loss, losses)
np.save(acc_loss, accuracies)
writer.close()
# debug prints
if args.debug_mode:
print("final parameters (weights and bias):")
for name, param in tbsm.named_parameters():
print(name)
print(param.detach().cpu().numpy())
# profiling
if args.enable_profiling:
with open("tbsm_pytorch.prof", "w") as prof_f:
prof_f.write(
prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total"
)
)
prof.export_chrome_trace("./tbsm_pytorch.json")
return
# evaluates model on test data and computes AUC metric
def test_tbsm(args, use_gpu):
# prepare data
test_ld, N_test = tp.make_tbsm_data_and_loader(args, "test")
# setup initial values
z_test = np.zeros((N_test, ), dtype=np.float)
t_test = np.zeros((N_test, ), dtype=np.float)
# check saved model exists
if not path.exists(args.save_model):
sys.exit("Can't find saved model. Exiting...")
# create or load TBSM
tbsm, device = get_tbsm(args, use_gpu)
print(args.save_model)
# main eval loop
# NOTE: call to tbsm.eval() not needed here, see
# https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615
offset = 0
for _, (X, lS_o, lS_i, T) in enumerate(test_ld):
batchSize = X[0].shape[0]
Z = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
z_test[offset: offset + batchSize] = np.squeeze(Z.detach().cpu().numpy(),
axis=1)
t_test[offset: offset + batchSize] = np.squeeze(T.detach().cpu().numpy(),
axis=1)
offset += batchSize
if args.quality_metric == "auc":
# compute AUC metric
auc_score = 100.0 * roc_auc_score(t_test.astype(int), z_test)
print("auc score: ", auc_score)
else:
sys.exit("Metric not supported.")
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(description="Time Based Sequence Model (TBSM)")
# path to dlrm
parser.add_argument("--dlrm-path", type=str, default="")
# data type: taobao or synthetic (generic)
parser.add_argument("--datatype", type=str, default="synthetic")
# mode: train or inference or both
parser.add_argument("--mode", type=str, default="train") # train, test, train-test
# data locations
parser.add_argument("--raw-train-file", type=str, default="./input/train.txt")
parser.add_argument("--pro-train-file", type=str, default="./output/train.npz")
parser.add_argument("--raw-test-file", type=str, default="./input/test.txt")
parser.add_argument("--pro-test-file", type=str, default="./output/test.npz")
parser.add_argument("--pro-val-file", type=str, default="./output/val.npz")
parser.add_argument("--num-train-pts", type=int, default=100)
parser.add_argument("--num-val-pts", type=int, default=20)
# time series length for train/val and test
parser.add_argument("--ts-length", type=int, default=20)
# model_type = "tsl", "mha", "rnn"
parser.add_argument("--model-type", type=str, default="tsl") # tsl, mha, rnn
parser.add_argument("--tsl-seq", action="store_true", default=False) # k-seq method
parser.add_argument("--tsl-proj", action="store_true", default=True) # sphere proj
parser.add_argument("--tsl-inner", type=str, default="def") # ind, def, dot
parser.add_argument("--tsl-num-heads", type=int, default=1) # num tsl components
parser.add_argument("--mha-num-heads", type=int, default=8) # num mha heads
parser.add_argument("--rnn-num-layers", type=int, default=5) # num rnn layers
# num positive (and negative) points per user
parser.add_argument("--points-per-user", type=int, default=10)
# model arch related parameters
# embedding dim for all sparse features (same for all features)
parser.add_argument("--arch-sparse-feature-size", type=int, default=4) # emb_dim
# number of distinct values for each sparse feature
parser.add_argument("--arch-embedding-size", type=str, default="4-3-2") # vectors
# for taobao use "987994-4162024-9439")
# MLP 1: num dense fea --> embedding dim for sparse fea (out_dim enforced)
parser.add_argument("--arch-mlp-bot", type=str, default="1-4")
# MLP 2: num_interactions + bot[-1] --> top[-1]
# (in_dim adjusted, out_dim can be any)
parser.add_argument("--arch-mlp-top", type=str, default="2-2")
# MLP 3: attention: ts_length --> ts_length (both adjusted)
parser.add_argument("--tsl-mlp", type=str, default="2-2")
# MLP 4: final prob. of click: 2 * top[-1] --> [0,1] (in_dim adjusted)
parser.add_argument("--arch-mlp", type=str, default="4-1")
# interactions
parser.add_argument("--arch-interaction-op", type=str, default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument("--tsl-interaction-op", type=str, default="dot")
parser.add_argument("--tsl-mechanism", type=str, default="mlp") # mul or MLP
# data
parser.add_argument("--num-batches", type=int, default=0)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.05)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--no-select-seed", action="store_true", default=False)
# inference
parser.add_argument("--quality-metric", type=str, default="auc")
parser.add_argument("--test-freq", type=int, default=0)
parser.add_argument("--inference-only", type=bool, default=False)
# saving model
parser.add_argument("--save-model", type=str, default="./output/model.pt")
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--device-num", type=int, default=0)
# debugging and profiling
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--enable-summary", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--run-fast", action="store_true", default=False)
args = parser.parse_args()
# the code requires access to dlrm model
if not path.exists(str(args.dlrm_path)):
sys.exit("Please provide path to DLRM as --dlrm-path")
sys.path.insert(1, args.dlrm_path)
import dlrm_s_pytorch as dlrm
if args.datatype == "taobao" and args.arch_embedding_size != "987994-4162024-9439":
sys.exit(
"ERROR: arch-embedding-size for taobao "
+ " needs to be 987994-4162024-9439"
)
if args.tsl_inner not in ["def", "ind"] and int(args.tsl_num_heads) > 1:
sys.exit(
"ERROR: dot product "
+ " assumes one tsl component (due to redundancy)"
)
# model_type = "tsl", "mha", "rnn"
print("dlrm path: ", args.dlrm_path)
print("model_type: ", args.model_type)
print("time series length: ", args.ts_length)
print("seed: ", args.numpy_rand_seed)
print("model_file:", args.save_model)
### some basic setup ###
use_gpu = args.use_gpu and torch.cuda.is_available()
set_seed(args.numpy_rand_seed, use_gpu)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
print("use-gpu:", use_gpu)
# possible modes:
# "train-test" for both training and metric computation on test data,
# "train" for training model
# "test" for metric computation on test data using saved trained model
modes = args.mode.split("-")
if modes[0] == "train":
train_tbsm(args, use_gpu)
if modes[0] == "test" or (len(modes) > 1 and modes[1] == "test"):
test_tbsm(args, use_gpu)
| 35,338 | 36.917382 | 148 | py |
tbsm | tbsm-main/tbsm_synthetic.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
from sklearn.metrics import roc_auc_score
# pytorch
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
# In synthetic experiment we generate the output vectors z of the embedding
# layer directly, therefore we create a custom TBSM, rather than instantiate
# an existing general model.
# Synthetic experiment code
# It generates time series data in D dimensions
# with the property that binary label has some dependency
# on coupling between time series components in pairs of dimensions.
def synthetic_experiment():
N, Nt, D, T = 50000, 5000, 5, 10
auc_results = np.empty((0, 5), np.float32)
def generate_data(N, high):
H = np.random.uniform(low=-1.0, high=1.0, size=N * D * T).reshape(N, T, D)
w = np.random.uniform(low=-1.0, high=1.0, size=N * D).reshape(N, 1, D)
return H, w
for K in range(0, 31, 10):
print("num q terms: ", K)
# ----- train set ------
H, w = generate_data(N, 1.0)
wt = np.transpose(w, (0, 2, 1))
p = np.zeros(D * K, dtype=np.int).reshape(K, D)
for j in range(K):
p[j, :] = np.random.permutation(D)
wt2 = wt[:, p[j], :]
wt = wt + wt2
Q = np.matmul(H[:, :, :], wt[:, :, :]) # similarity coefs
Q = np.squeeze(Q, axis=2)
R = np.mean(Q, axis=1)
R = np.sign(R)
# s1 = np.count_nonzero(R > 0)
# print(Q.shape)
# print("num pos, total: ", s1, N)
R = R + 1
t_train = R.reshape(N, 1)
z_train = np.concatenate((H, w), axis=1)
# ----- test set ------
H, w = generate_data(Nt, 1.0)
wt = np.transpose(w, (0, 2, 1))
for j in range(K):
wt2 = wt[:, p[j], :]
wt = wt + wt2
Q = np.matmul(H[:, :, :], wt[:, :, :]) # dot product
Q = np.squeeze(Q, axis=2)
R = np.mean(Q, axis=1)
R = np.sign(R) + 1
t_test = R.reshape(Nt, 1)
z_test = np.concatenate((H, w), axis=1)
# debug prints
# print(z_train.shape, t_train.shape)
class SyntheticDataset:
def __init__(self, F, y):
self.F = F
self.y = y
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
return self.F[index], self.y[index]
def __len__(self):
return len(self.y)
ztraind = SyntheticDataset(z_train, t_train)
ztestd = SyntheticDataset(z_test, t_test)
def collate_zfn(list_of_tuples):
data = list(zip(*list_of_tuples))
F = torch.tensor(data[0], dtype=torch.float)
y = torch.tensor(data[1], dtype=torch.float)
# y = torch.unsqueeze(y, 1)
return F, y
ztrain_ld = torch.utils.data.DataLoader(
ztraind,
batch_size=128,
num_workers=0,
collate_fn=collate_zfn,
shuffle=True
)
ztest_ld = torch.utils.data.DataLoader(
ztestd,
batch_size=Nt,
num_workers=0,
collate_fn=collate_zfn,
)
### define TBSM in PyTorch ###
class TBSM_SubNet(nn.Module):
def __init__(
self,
mode,
num_inner,
D,
T,
):
super(TBSM_SubNet, self).__init__()
self.mode = mode
self.num_inner = num_inner
if self.mode in ["def", "ind", "dot"]:
if self.mode in ["def", "ind"]:
self.A = []
mean = 0.0
std_dev = np.sqrt(2 / (D + D))
for _ in range(self.num_inner):
E = np.eye(D, dtype=np.float32)
W = np.random.normal(mean, std_dev, size=(1, D, D)) \
.astype(np.float32)
self.A.append(Parameter(torch.tensor(E + W),
requires_grad=True))
d = self.num_inner * T
# d = self.num_inner * D + D
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
elif self.mode == "mha":
m = D # dim
self.nheads = 8
self.emb_m = self.nheads * m # mha emb dim
mean = 0.0
std_dev = np.sqrt(2 / (m + m)) # np.sqrt(1 / m) # np.sqrt(1 / n)
qm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.Q = Parameter(torch.tensor(qm), requires_grad=True)
km = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.K = Parameter(torch.tensor(km), requires_grad=True)
vm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.V = Parameter(torch.tensor(vm), requires_grad=True)
d = self.nheads * m
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
else:
d = D * (T + 1)
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
def forward(self, x):
# H * w
H = x[:, :-1, :]
w = torch.unsqueeze(x[:, -1, :], dim=1)
w = torch.transpose(w, 1, 2)
# inner products
if self.mode in ["def", "ind"]:
for j in range(self.num_inner):
aw = torch.matmul(self.A[j], w)
if self.mode == "def":
aw = torch.matmul(self.A[j].permute(0, 2, 1), aw)
a1 = torch.bmm(H, aw)
if j == 0:
z = a1
else:
z = torch.cat([z, a1], dim=1)
z = torch.squeeze(z, dim=2)
elif self.mode == "dot":
z = torch.bmm(H, w)
z = torch.squeeze(z, dim=2)
elif self.mode == "mha":
w = torch.transpose(w, 1, 2)
# print("mha shapes: ", w.shape, self.Q.shape)
Qx = torch.transpose(torch.matmul(w, self.Q), 0, 1)
HK = torch.transpose(torch.matmul(H, self.K), 0, 1)
HV = torch.transpose(torch.matmul(H, self.V), 0, 1)
multihead_attn = nn.MultiheadAttention(self.emb_m, self.nheads)
attn_output, _ = multihead_attn(Qx, HK, HV)
# print("attn shape: ", attn_output.shape)
z = torch.squeeze(attn_output, dim=0)
else: # concat
H = torch.flatten(H, start_dim=1, end_dim=2)
w = torch.flatten(w, start_dim=1, end_dim=2)
z = torch.cat([H, w], dim=1)
# obtain probability of a click as a result of MLP
p = dlrm.DLRM_Net().apply_mlp(z, self.mlp)
return p
def train_inner(znet):
loss_fn = torch.nn.BCELoss(reduction="mean")
# loss_fn = torch.nn.L1Loss(reduction="mean")
optimizer = torch.optim.Adagrad(znet.parameters(), lr=0.05)
# optimizer = torch.optim.SGD(znet.parameters(), lr=0.05)
znet.train()
nepochs = 1
for _ in range(nepochs):
TA = 0
TS = 0
for _, (X, y) in enumerate(ztrain_ld):
batchSize = X.shape[0]
# forward pass
Z = znet(X)
# loss
# print("Z, y: ", Z.shape, y.shape)
E = loss_fn(Z, y)
# compute loss and accuracy
# L = E.detach().cpu().numpy() # numpy array
z = Z.detach().cpu().numpy() # numpy array
t = y.detach().cpu().numpy() # numpy array
# rounding t: smooth labels case
A = np.sum((np.round(z, 0) == np.round(t, 0)).astype(np.uint16))
TA += A
TS += batchSize
optimizer.zero_grad()
# backward pass
E.backward(retain_graph=True)
# optimizer
optimizer.step()
# if j % 500 == 0:
# acc = 100.0 * TA / TS
# print("j, acc: ", j, acc)
# TA = 0
# TS = 0
z_final = np.zeros(Nt, dtype=np.float)
offset = 0
znet.eval()
for _, (X, _) in enumerate(ztest_ld):
batchSize = X.shape[0]
Z = znet(X)
z_final[offset: offset + batchSize] = \
np.squeeze(Z.detach().cpu().numpy(), axis=1)
offset += batchSize
# E = loss_fn(Z, y)
# L = E.detach().cpu().numpy() # numpy array
# loss_net = L
# print(znet.num_inner, znet.mode, ": ", loss_net)
auc_net = 100.0 * roc_auc_score(t_test.astype(int), z_final)
print(znet.num_inner, znet.mode, ": ", auc_net)
return auc_net
dim = T
znet = TBSM_SubNet("dot", 1, D, dim) # c or c,w
res1 = train_inner(znet)
znet = TBSM_SubNet("def", 1, D, dim) # c or c,w
res2 = train_inner(znet)
znet = TBSM_SubNet("def", 4, D, dim) # c or c,w
res3 = train_inner(znet)
znet = TBSM_SubNet("def", 8, D, dim) # c or c,w
res4 = train_inner(znet)
znet = TBSM_SubNet("mha", 1, D, dim) # c or c,w
res5 = train_inner(znet)
auc_results = np.append(auc_results, np.array([[res1, res2, res3, res4, res5]]),
axis=0)
print(auc_results)
# np.savez_compressed(
# 'auc_synthetic.npz',
# auc_results=auc_results,
# )
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Synthetic data experiments (TBSM)")
# path to dlrm
parser.add_argument("--dlrm-path", type=str, default="")
args = parser.parse_args()
if not path.exists(str(args.dlrm_path)):
sys.exit("Please provide path to DLRM as --dlrm-path")
sys.path.insert(1, args.dlrm_path)
import dlrm_s_pytorch as dlrm
synthetic_experiment()
| 11,465 | 36.106796 | 88 | py |
tbsm | tbsm-main/tbsm_data_pytorch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
### import packages ###
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
# pytorch
import torch
# dataset (either synthetic or Taobao)
class TBSMDataset():
def __init__(
self,
datatype,
mode,
ts_length=20,
points_per_user=4,
numpy_rand_seed=7,
raw_path="",
pro_data="",
spa_fea_sizes="",
num_pts=1, # pts to train or test
):
# save arguments
if mode == "train":
self.numpy_rand_seed = numpy_rand_seed
else:
self.numpy_rand_seed = numpy_rand_seed + 31
self.mode = mode
# save dataset parameters
self.total = num_pts # number of lines in txt to process
self.ts_length = ts_length
self.points_per_user = points_per_user # pos and neg points per user
self.spa_fea_sizes = spa_fea_sizes
self.M = 200 # max history length
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1]
# preprocess data if needed
if path.exists(str(pro_data)):
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
file = str(pro_data)
levels = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
if datatype == "taobao":
self.Unum = levels[0] # 987994 num of users
self.Inum = levels[1] # 4162024 num of items
self.Cnum = levels[2] # 9439 num of categories
print("Reading raw data=%s" % (str(raw_path)))
if self.mode == "test":
self.build_taobao_test(
raw_path,
file,
)
else:
self.build_taobao_train_or_val(
raw_path,
file,
)
elif datatype == "synthetic":
self.build_synthetic_train_or_val(
file,
)
# load data
with np.load(file) as data:
self.X_cat = data["X_cat"]
self.X_int = data["X_int"]
self.y = data["y"]
# common part between train/val and test generation
# truncates (if needed) and shuffles data points
def truncate_and_save(self, out_file, do_shuffle, t, users, items, cats, times, y):
# truncate. If for some users we didn't generate had too short history
# we truncate the unused portion of the pre-allocated matrix.
if t < self.total_out:
users = users[:t, :]
items = items[:t, :]
cats = cats[:t, :]
times = times[:t, :]
y = y[:t]
# shuffle
if do_shuffle:
indices = np.arange(len(y))
indices = np.random.permutation(indices)
users = users[indices]
items = items[indices]
cats = cats[indices]
times = times[indices]
y = y[indices]
N = len(y)
X_cat = np.zeros((3, N, self.ts_length + 1), dtype="i4") # 4 byte int
X_int = np.zeros((1, N, self.ts_length + 1), dtype=np.float)
X_cat[0, :, :] = users
X_cat[1, :, :] = items
X_cat[2, :, :] = cats
X_int[0, :, :] = times
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
# processes raw train or validation into npz format required by training
# for train data out of each line in raw datafile produces several randomly chosen
# datapoints, max number of datapoints per user is specified by points_per_user
# argument, for validation data produces one datapoint per user.
def build_taobao_train_or_val(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = min(self.total, i + 1)
print("total lines: ", self.total)
self.total_out = self.total * self.points_per_user * 2 # pos + neg points
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
np.random.seed(self.numpy_rand_seed)
r_target = np.arange(0, self.M - 1)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
# time = np.ones(self.ts_length + 1, dtype=np.int32)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# determine how many datapoints to take from each user based on the length of
# user behavior sequence
# ind=0, 1, 2, 3,... t < 10, 20, 30, 40, 50, 60, ...
k = 20
regime = np.zeros(k, dtype=np.int)
regime[1], regime[2], regime[3] = 1, 3, 6
for j in range(4, k):
regime[j] = self.points_per_user
if self.mode == "val":
self.points_per_user = 1
for j in range(k):
regime[j] = np.min([regime[j], self.points_per_user])
last = self.M - 1 # max index of last item
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg, t_short = 0, 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg, t_short)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
neg_item_hist_list = units[6].split(",")
neg_cate_hist_list = units[7].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
# y[i] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
neg_items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), neg_item_hist_list)),
dtype=np.int32
)
neg_cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), neg_cate_hist_list)),
dtype=np.int32
)
# select datapoints
first = np.argmax(items_ > 0)
ind = int((last - first) // 10) # index into regime array
# pos
for _ in range(regime[ind]):
a1 = min(first + self.ts_length, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t] = items_[indices]
cats[t] = cats_[indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 1
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_pos += 1
# neg
for _ in range(regime[ind]):
a1 = min(first + self.ts_length - 1, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length + 1, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t, :-1] = items_[indices]
cats[t, :-1] = cats_[indices]
neg_indices = np.random.choice(r_target, 1,
replace=False) # random final item
items[t, -1] = neg_items_[neg_indices]
cats[t, -1] = neg_cats_[neg_indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 0
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_neg += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, True, t, users, items, cats, times, y)
return
# processes raw test datafile into npz format required to be used by
# inference step, produces one datapoint per user by taking last ts-length items
def build_taobao_test(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = i + 1
self.total_out = self.total # pos + neg points
print("ts_length: ", self.ts_length)
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg = 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
y[t] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
# get pts
items[t] = items_[-(self.ts_length + 1):]
cats[t] = cats_[-(self.ts_length + 1):]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
if y[t] == 1:
t_pos += 1
else:
t_neg += 1
t += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, False, t, users, items, cats, times, y)
return
# builds small synthetic data mimicking the structure of taobao data
def build_synthetic_train_or_val(self, out_file):
np.random.seed(123)
fea_sizes = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
maxval = np.min(fea_sizes)
num_s = len(fea_sizes)
X_cat = np.random.randint(maxval, size=(num_s, self.total, self.ts_length + 1),
dtype="i4") # 4 byte int
X_int = np.random.uniform(0, 1, size=(1, self.total, self.ts_length + 1))
y = np.random.randint(0, 2, self.total, dtype="i4") # 4 byte int
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
return self.X_cat[:, index, :], self.X_int[:, index, :], self.y[index]
def __len__(self):
return len(self.y)
# defines transform to be performed during each call to batch,
# used by loader
def collate_wrapper_tbsm(list_of_tuples):
# turns tuple into X, S_o, S_i, take last ts_length items
data = list(zip(*list_of_tuples))
all_cat = torch.tensor(data[0], dtype=torch.long)
all_int = torch.tensor(data[1], dtype=torch.float)
# print("shapes:", all_cat.shape, all_int.shape)
num_den_fea = all_int.shape[1]
num_cat_fea = all_cat.shape[1]
batchSize = all_cat.shape[0]
ts_len = all_cat.shape[2]
all_int = torch.reshape(all_int, (batchSize, num_den_fea * ts_len))
X = []
lS_i = []
lS_o = []
# transform data into the form used in dlrm nn
for j in range(ts_len):
lS_i_h = []
for i in range(num_cat_fea):
lS_i_h.append(all_cat[:, i, j])
lS_o_h = [torch.tensor(range(batchSize)) for _ in range(len(lS_i_h))]
lS_i.append(lS_i_h)
lS_o.append(lS_o_h)
X.append(all_int[:, j].view(-1, 1))
T = torch.tensor(data[2], dtype=torch.float32).view(-1, 1)
return X, lS_o, lS_i, T
# creates a loader (train, val or test data) to be used in the main training loop
# or during inference step
def make_tbsm_data_and_loader(args, mode):
if mode == "train":
raw = args.raw_train_file
proc = args.pro_train_file
numpts = args.num_train_pts
batchsize = args.mini_batch_size
doshuffle = True
elif mode == "val":
raw = args.raw_train_file
proc = args.pro_val_file
numpts = args.num_val_pts
batchsize = 25000
doshuffle = True
else:
raw = args.raw_test_file
proc = args.pro_test_file
numpts = 1
batchsize = 25000
doshuffle = False
data = TBSMDataset(
args.datatype,
mode,
args.ts_length,
args.points_per_user,
args.numpy_rand_seed,
raw,
proc,
args.arch_embedding_size,
numpts,
)
loader = torch.utils.data.DataLoader(
data,
batch_size=batchsize,
num_workers=0,
collate_fn=collate_wrapper_tbsm,
shuffle=doshuffle,
)
return loader, len(data)
| 16,772 | 36.52349 | 88 | py |
tbsm | tbsm-main/tools/taobao_prepare.py | # Copyright (c) 2019 UIC-Paper
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Source: https://github.com/UIC-Paper/MIMN
import cPickle as pkl
import pandas as pd
import random
import numpy as np
RAW_DATA_FILE = './data/taobao_data/UserBehavior.csv'
DATASET_PKL = './data/taobao_data/dataset.pkl'
Test_File = "./data/taobao_data/taobao_test.txt"
Train_File = "./data/taobao_data/taobao_train.txt"
Train_handle = open(Train_File, 'w')
Test_handle = open(Test_File, 'w')
Feature_handle = open("./data/taobao_data/taobao_feature.pkl",'w')
MAX_LEN_ITEM = 200
def to_df(file_name):
df = pd.read_csv(RAW_DATA_FILE, header=None, names=['uid', 'iid', 'cid', 'btag', 'time'])
return df
def remap(df):
item_key = sorted(df['iid'].unique().tolist())
item_len = len(item_key)
item_map = dict(zip(item_key, range(item_len)))
df['iid'] = df['iid'].map(lambda x: item_map[x])
user_key = sorted(df['uid'].unique().tolist())
user_len = len(user_key)
user_map = dict(zip(user_key, range(item_len, item_len + user_len)))
df['uid'] = df['uid'].map(lambda x: user_map[x])
cate_key = sorted(df['cid'].unique().tolist())
cate_len = len(cate_key)
cate_map = dict(zip(cate_key, range(user_len + item_len, user_len + item_len + cate_len)))
df['cid'] = df['cid'].map(lambda x: cate_map[x])
btag_key = sorted(df['btag'].unique().tolist())
btag_len = len(btag_key)
btag_map = dict(zip(btag_key, range(user_len + item_len + cate_len, user_len + item_len + cate_len + btag_len)))
df['btag'] = df['btag'].map(lambda x: btag_map[x])
print(item_len, user_len, cate_len, btag_len)
return df, item_len, user_len + item_len + cate_len + btag_len + 1 #+1 is for unknown target btag
def gen_user_item_group(df, item_cnt, feature_size):
user_df = df.sort_values(['uid', 'time']).groupby('uid')
item_df = df.sort_values(['iid', 'time']).groupby('iid')
print("group completed")
return user_df, item_df
def gen_dataset(user_df, item_df, item_cnt, feature_size, dataset_pkl):
train_sample_list = []
test_sample_list = []
# get each user's last touch point time
print(len(user_df))
user_last_touch_time = []
for uid, hist in user_df:
user_last_touch_time.append(hist['time'].tolist()[-1])
print("get user last touch time completed")
user_last_touch_time_sorted = sorted(user_last_touch_time)
split_time = user_last_touch_time_sorted[int(len(user_last_touch_time_sorted) * 0.7)]
cnt = 0
for uid, hist in user_df:
cnt += 1
print(cnt)
item_hist = hist['iid'].tolist()
cate_hist = hist['cid'].tolist()
btag_hist = hist['btag'].tolist()
target_item_time = hist['time'].tolist()[-1]
target_item = item_hist[-1]
target_item_cate = cate_hist[-1]
target_item_btag = feature_size
label = 1
test = (target_item_time > split_time)
# neg sampling
neg = random.randint(0, 1)
if neg == 1:
label = 0
while target_item == item_hist[-1]:
target_item = random.randint(0, item_cnt - 1)
target_item_cate = item_df.get_group(target_item)['cid'].tolist()[0]
target_item_btag = feature_size
# the item history part of the sample
item_part = []
for i in range(len(item_hist) - 1):
item_part.append([uid, item_hist[i], cate_hist[i], btag_hist[i]])
item_part.append([uid, target_item, target_item_cate, target_item_btag])
# item_part_len = min(len(item_part), MAX_LEN_ITEM)
# choose the item side information: which user has clicked the target item
# padding history with 0
if len(item_part) <= MAX_LEN_ITEM:
item_part_pad = [[0] * 4] * (MAX_LEN_ITEM - len(item_part)) + item_part
else:
item_part_pad = item_part[len(item_part) - MAX_LEN_ITEM:len(item_part)]
# gen sample
# sample = (label, item_part_pad, item_part_len, user_part_pad, user_part_len)
if test:
# test_set.append(sample)
cat_list = []
item_list = []
# btag_list = []
for i in range(len(item_part_pad)):
item_list.append(item_part_pad[i][1])
cat_list.append(item_part_pad[i][2])
# cat_list.append(item_part_pad[i][0])
test_sample_list.append(str(uid) + "\t" + str(target_item) + "\t" + str(target_item_cate) + "\t" + str(label) + "\t" + ",".join(map(str, item_list)) + "\t" +",".join(map(str, cat_list))+"\n")
else:
cat_list = []
item_list = []
# btag_list = []
for i in range(len(item_part_pad)):
item_list.append(item_part_pad[i][1])
cat_list.append(item_part_pad[i][2])
train_sample_list.append(str(uid) + "\t" + str(target_item) + "\t" + str(target_item_cate) + "\t" + str(label) + "\t" + ",".join(map(str, item_list)) + "\t" +",".join(map(str, cat_list))+"\n")
train_sample_length_quant = len(train_sample_list)/256*256
test_sample_length_quant = len(test_sample_list)/256*256
print("length",len(train_sample_list))
train_sample_list = train_sample_list[:train_sample_length_quant]
test_sample_list = test_sample_list[:test_sample_length_quant]
random.shuffle(train_sample_list)
print("length",len(train_sample_list))
return train_sample_list, test_sample_list
def produce_neg_item_hist_with_cate(train_file, test_file):
item_dict = {}
sample_count = 0
hist_seq = 0
for line in train_file:
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
hist_list = zip(item_hist_list, cate_hist_list)
hist_seq = len(hist_list)
sample_count += 1
for item in hist_list:
item_dict.setdefault(str(item),0)
for line in test_file:
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
hist_list = zip(item_hist_list, cate_hist_list)
hist_seq = len(hist_list)
sample_count += 1
for item in hist_list:
item_dict.setdefault(str(item),0)
del(item_dict["('0', '0')"])
neg_array = np.random.choice(np.array(item_dict.keys()), (sample_count, hist_seq+20))
neg_list = neg_array.tolist()
sample_count = 0
for line in train_file:
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
hist_list = zip(item_hist_list, cate_hist_list)
hist_seq = len(hist_list)
neg_hist_list = []
for item in neg_list[sample_count]:
item = eval(item)
if item not in hist_list:
neg_hist_list.append(item)
if len(neg_hist_list) == hist_seq:
break
sample_count += 1
neg_item_list, neg_cate_list = zip(*neg_hist_list)
Train_handle.write(line.strip() + "\t" + ",".join(neg_item_list) + "\t" + ",".join(neg_cate_list) + "\n" )
for line in test_file:
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
hist_list = zip(item_hist_list, cate_hist_list)
hist_seq = len(hist_list)
neg_hist_list = []
for item in neg_list[sample_count]:
item = eval(item)
if item not in hist_list:
neg_hist_list.append(item)
if len(neg_hist_list) == hist_seq:
break
sample_count += 1
neg_item_list, neg_cate_list = zip(*neg_hist_list)
Test_handle.write(line.strip() + "\t" + ",".join(neg_item_list) + "\t" + ",".join(neg_cate_list) + "\n" )
def main():
df = to_df(RAW_DATA_FILE)
df, item_cnt, feature_size = remap(df)
print("feature_size", item_cnt, feature_size)
feature_total_num = feature_size + 1
pkl.dump(feature_total_num, Feature_handle)
user_df, item_df = gen_user_item_group(df, item_cnt, feature_size)
train_sample_list, test_sample_list = gen_dataset(user_df, item_df, item_cnt, feature_size, DATASET_PKL)
produce_neg_item_hist_with_cate(train_sample_list, test_sample_list)
if __name__ == '__main__':
main()
| 8,508 | 36.484581 | 204 | py |
rllab | rllab-master/setup.py | # setup.py
from setuptools import setup,find_packages
setup(
name='rllab',
packages=[package for package in find_packages()
if package.startswith('rllab')],
version='0.1.0',
)
| 205 | 19.6 | 52 | py |
rllab | rllab-master/examples/trpo_cartpole_recurrent.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_gru_policy import GaussianGRUPolicy
from rllab.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer, FiniteDifferenceHvp
from rllab.misc.instrument import run_experiment_lite
def run_task(*_):
env = normalize(CartpoleEnv())
policy = GaussianGRUPolicy(
env_spec=env.spec,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=10,
discount=0.99,
step_size=0.01,
optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
seed=1,
)
| 1,003 | 25.421053 | 105 | py |
rllab | rllab-master/examples/cluster_gym_mujoco_demo.py | from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.misc.instrument import run_experiment_lite
from rllab.envs.gym_env import GymEnv
import sys
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def step_size(self):
return [0.01, 0.05, 0.1]
@variant
def seed(self):
return [1, 11, 21, 31, 41]
def run_task(vv):
env = TfEnv(normalize(GymEnv('HalfCheetah-v1', record_video=False, record_log=False)))
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32),
name="policy"
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=vv["step_size"],
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
variants = VG().variants()
for v in variants:
run_experiment_lite(
run_task,
exp_prefix="first_exp",
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=v["seed"],
# mode="local",
mode="ec2",
variant=v,
# plot=True,
# terminate_machine=False,
)
sys.exit()
| 1,919 | 25.30137 | 93 | py |
rllab | rllab-master/examples/trpo_cartpole.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=0.01,
)
algo.train()
| 713 | 24.5 | 89 | py |
rllab | rllab-master/examples/trpo_cartpole_pickled.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
def run_task(*_):
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=1000,
discount=0.99,
step_size=0.01,
# Uncomment both lines (this and the plot parameter below) to enable plotting
#plot=True
)
algo.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=2,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
#plot=True
)
| 1,287 | 27 | 93 | py |
rllab | rllab-master/examples/vpg_2.py |
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import normalize
import numpy as np
import theano
import theano.tensor as TT
from lasagne.updates import adam
# normalize() makes sure that the actions for the environment lies
# within the range [-1, 1] (only works for environments with continuous actions)
env = normalize(CartpoleEnv())
# Initialize a neural network policy with a single hidden layer of 8 hidden units
policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,))
# Initialize a linear baseline estimator using default hand-crafted features
baseline = LinearFeatureBaseline(env.spec)
# We will collect 100 trajectories per iteration
N = 100
# Each trajectory will have at most 100 time steps
T = 100
# Number of iterations
n_itr = 100
# Set the discount factor for the problem
discount = 0.99
# Learning rate for the gradient update
learning_rate = 0.1
# Construct the computation graph
# Create a Theano variable for storing the observations
# We could have simply written `observations_var = TT.matrix('observations')` instead for this example. However,
# doing it in a slightly more abstract way allows us to delegate to the environment for handling the correct data
# type for the variable. For instance, for an environment with discrete observations, we might want to use integer
# types if the observations are represented as one-hot vectors.
observations_var = env.observation_space.new_tensor_variable(
'observations',
# It should have 1 extra dimension since we want to represent a list of observations
extra_dims=1
)
actions_var = env.action_space.new_tensor_variable(
'actions',
extra_dims=1
)
advantages_var = TT.vector('advantages')
# policy.dist_info_sym returns a dictionary, whose values are symbolic expressions for quantities related to the
# distribution of the actions. For a Gaussian policy, it contains the mean and (log) standard deviation.
dist_info_vars = policy.dist_info_sym(observations_var)
# policy.distribution returns a distribution object under rllab.distributions. It contains many utilities for computing
# distribution-related quantities, given the computed dist_info_vars. Below we use dist.log_likelihood_sym to compute
# the symbolic log-likelihood. For this example, the corresponding distribution is an instance of the class
# rllab.distributions.DiagonalGaussian
dist = policy.distribution
# Note that we negate the objective, since most optimizers assume a
# minimization problem
surr = - TT.mean(dist.log_likelihood_sym(actions_var, dist_info_vars) * advantages_var)
# Get the list of trainable parameters.
params = policy.get_params(trainable=True)
grads = theano.grad(surr, params)
f_train = theano.function(
inputs=[observations_var, actions_var, advantages_var],
outputs=None,
updates=adam(grads, params, learning_rate=learning_rate),
allow_input_downcast=True
)
for _ in range(n_itr):
paths = []
for _ in range(N):
observations = []
actions = []
rewards = []
observation = env.reset()
for _ in range(T):
# policy.get_action() returns a pair of values. The second one returns a dictionary, whose values contains
# sufficient statistics for the action distribution. It should at least contain entries that would be
# returned by calling policy.dist_info(), which is the non-symbolic analog of policy.dist_info_sym().
# Storing these statistics is useful, e.g., when forming importance sampling ratios. In our case it is
# not needed.
action, _ = policy.get_action(observation)
# Recall that the last entry of the tuple stores diagnostic information about the environment. In our
# case it is not needed.
next_observation, reward, terminal, _ = env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
observation = next_observation
if terminal:
# Finish rollout if terminal state reached
break
# We need to compute the empirical return for each time step along the
# trajectory
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
)
path_baseline = baseline.predict(path)
advantages = []
returns = []
return_so_far = 0
for t in range(len(rewards) - 1, -1, -1):
return_so_far = rewards[t] + discount * return_so_far
returns.append(return_so_far)
advantage = return_so_far - path_baseline[t]
advantages.append(advantage)
# The advantages are stored backwards in time, so we need to revert it
advantages = np.array(advantages[::-1])
# And we need to do the same thing for the list of returns
returns = np.array(returns[::-1])
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-8)
path["advantages"] = advantages
path["returns"] = returns
paths.append(path)
baseline.fit(paths)
observations = np.concatenate([p["observations"] for p in paths])
actions = np.concatenate([p["actions"] for p in paths])
advantages = np.concatenate([p["advantages"] for p in paths])
f_train(observations, actions, advantages)
print('Average Return:', np.mean([sum(p["rewards"]) for p in paths]))
| 5,669 | 40.086957 | 119 | py |
rllab | rllab-master/examples/cluster_demo.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
import sys
def run_task(v):
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=v["step_size"],
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
for step_size in [0.01, 0.05, 0.1]:
for seed in [1, 11, 21, 31, 41]:
run_experiment_lite(
run_task,
exp_prefix="first_exp",
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=seed,
# mode="local",
mode="ec2",
variant=dict(step_size=step_size, seed=seed)
# plot=True,
# terminate_machine=False,
)
sys.exit()
| 1,682 | 29.6 | 93 | py |
rllab | rllab-master/examples/trpo_gym_cartpole.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
# Please note that different environments with different action spaces may
# require different policies. For example with a Discrete action space, a
# CategoricalMLPPolicy works, but for a Box action space may need to use
# a GaussianMLPPolicy (see the trpo_gym_pendulum.py example)
env = normalize(GymEnv("CartPole-v0"))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
# plot=True,
)
| 1,597 | 30.96 | 93 | py |
rllab | rllab-master/examples/trpo_gym_pendulum.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
def run_task(*_):
# Please note that different environments with different action spaces may require different
# policies. For example with a Box action space, a GaussianMLPPolicy works, but for a Discrete
# action space may need to use a CategoricalMLPPolicy (see the trpo_gym_cartpole.py example)
env = normalize(GymEnv("Pendulum-v0"))
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
# plot=True,
)
| 1,582 | 31.306122 | 98 | py |
rllab | rllab-master/examples/trpo_point.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from examples.point_env import PointEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
env = normalize(PointEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
)
algo.train()
| 478 | 25.611111 | 73 | py |
rllab | rllab-master/examples/ddpg_cartpole.py | from rllab.algos.ddpg import DDPG
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.exploration_strategies.ou_strategy import OUStrategy
from rllab.policies.deterministic_mlp_policy import DeterministicMLPPolicy
from rllab.q_functions.continuous_mlp_q_function import ContinuousMLPQFunction
def run_task(*_):
env = normalize(CartpoleEnv())
policy = DeterministicMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
es = OUStrategy(env_spec=env.spec)
qf = ContinuousMLPQFunction(env_spec=env.spec)
algo = DDPG(
env=env,
policy=policy,
es=es,
qf=qf,
batch_size=32,
max_path_length=100,
epoch_length=1000,
min_pool_size=10000,
n_epochs=1000,
discount=0.99,
scale_reward=0.01,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
# plot=True,
)
| 1,538 | 28.037736 | 93 | py |
rllab | rllab-master/examples/trpo_swimmer.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
env = normalize(SwimmerEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=500,
n_itr=40,
discount=0.99,
step_size=0.01,
)
algo.train()
| 711 | 24.428571 | 89 | py |
rllab | rllab-master/examples/point_env.py | from rllab.envs.base import Env
from rllab.spaces import Box
from rllab.envs.base import Step
import numpy as np
class PointEnv(Env):
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=-0.1, high=0.1, shape=(2,))
def reset(self):
self._state = np.random.uniform(-1, 1, size=(2,))
observation = np.copy(self._state)
return observation
def step(self, action):
self._state = self._state + action
x, y = self._state
reward = - (x ** 2 + y ** 2) ** 0.5
done = abs(x) < 0.01 and abs(y) < 0.01
next_observation = np.copy(self._state)
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
print('current state:', self._state)
| 866 | 26.967742 | 75 | py |
rllab | rllab-master/examples/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/examples/nop_cartpole.py | from rllab.algos.nop import NOP
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.uniform_control_policy import UniformControlPolicy
env = normalize(CartpoleEnv())
policy = UniformControlPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
)
baseline = ZeroBaseline(env_spec=env.spec)
algo = NOP(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=0.01,
)
algo.train()
| 665 | 23.666667 | 89 | py |
rllab | rllab-master/examples/vpg_1.py |
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import normalize
import numpy as np
import theano
import theano.tensor as TT
from lasagne.updates import adam
# normalize() makes sure that the actions for the environment lies
# within the range [-1, 1] (only works for environments with continuous actions)
env = normalize(CartpoleEnv())
# Initialize a neural network policy with a single hidden layer of 8 hidden units
policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,))
# We will collect 100 trajectories per iteration
N = 100
# Each trajectory will have at most 100 time steps
T = 100
# Number of iterations
n_itr = 100
# Set the discount factor for the problem
discount = 0.99
# Learning rate for the gradient update
learning_rate = 0.01
# Construct the computation graph
# Create a Theano variable for storing the observations
# We could have simply written `observations_var = TT.matrix('observations')` instead for this example. However,
# doing it in a slightly more abstract way allows us to delegate to the environment for handling the correct data
# type for the variable. For instance, for an environment with discrete observations, we might want to use integer
# types if the observations are represented as one-hot vectors.
observations_var = env.observation_space.new_tensor_variable(
'observations',
# It should have 1 extra dimension since we want to represent a list of observations
extra_dims=1
)
actions_var = env.action_space.new_tensor_variable(
'actions',
extra_dims=1
)
returns_var = TT.vector('returns')
# policy.dist_info_sym returns a dictionary, whose values are symbolic expressions for quantities related to the
# distribution of the actions. For a Gaussian policy, it contains the mean and the logarithm of the standard deviation.
dist_info_vars = policy.dist_info_sym(observations_var)
# policy.distribution returns a distribution object under rllab.distributions. It contains many utilities for computing
# distribution-related quantities, given the computed dist_info_vars. Below we use dist.log_likelihood_sym to compute
# the symbolic log-likelihood. For this example, the corresponding distribution is an instance of the class
# rllab.distributions.DiagonalGaussian
dist = policy.distribution
# Note that we negate the objective, since most optimizers assume a minimization problem
surr = - TT.mean(dist.log_likelihood_sym(actions_var, dist_info_vars) * returns_var)
# Get the list of trainable parameters.
params = policy.get_params(trainable=True)
grads = theano.grad(surr, params)
f_train = theano.function(
inputs=[observations_var, actions_var, returns_var],
outputs=None,
updates=adam(grads, params, learning_rate=learning_rate),
allow_input_downcast=True
)
for _ in range(n_itr):
paths = []
for _ in range(N):
observations = []
actions = []
rewards = []
observation = env.reset()
for _ in range(T):
# policy.get_action() returns a pair of values. The second one returns a dictionary, whose values contains
# sufficient statistics for the action distribution. It should at least contain entries that would be
# returned by calling policy.dist_info(), which is the non-symbolic analog of policy.dist_info_sym().
# Storing these statistics is useful, e.g., when forming importance sampling ratios. In our case it is
# not needed.
action, _ = policy.get_action(observation)
# Recall that the last entry of the tuple stores diagnostic information about the environment. In our
# case it is not needed.
next_observation, reward, terminal, _ = env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
observation = next_observation
if terminal:
# Finish rollout if terminal state reached
break
# We need to compute the empirical return for each time step along the
# trajectory
returns = []
return_so_far = 0
for t in range(len(rewards) - 1, -1, -1):
return_so_far = rewards[t] + discount * return_so_far
returns.append(return_so_far)
# The returns are stored backwards in time, so we need to revert it
returns = returns[::-1]
paths.append(dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
returns=np.array(returns)
))
observations = np.concatenate([p["observations"] for p in paths])
actions = np.concatenate([p["actions"] for p in paths])
returns = np.concatenate([p["returns"] for p in paths])
f_train(observations, actions, returns)
print('Average Return:', np.mean([sum(p["rewards"]) for p in paths]))
| 5,002 | 40.347107 | 119 | py |
rllab | rllab-master/examples/trpo_gym_tf_cartpole.py | from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.categorical_mlp_policy import CategoricalMLPPolicy
from sandbox.rocky.tf.algos.trpo import TRPO
stub(globals())
# Need to wrap in a tf environment and force_reset to true
# see https://github.com/openai/rllab/issues/87#issuecomment-282519288
env = TfEnv(normalize(GymEnv("CartPole-v0", force_reset=True)))
policy = CategoricalMLPPolicy(
name="policy",
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=200,
n_itr=120,
discount=0.99,
step_size=0.01,
# optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
)
run_experiment_lite(
algo.train(),
n_parallel=1,
snapshot_mode="last",
seed=1
)
| 1,194 | 26.790698 | 91 | py |
rllab | rllab-master/sandbox/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/launchers/vpg_cartpole.py | from sandbox.rocky.tf.algos.vpg import VPG
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.misc.instrument import stub, run_experiment_lite
env = TfEnv(normalize(CartpoleEnv()))
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(
env=env,
policy=policy,
baseline=baseline,
batch_size=10000,
max_path_length=100,
n_itr=40,
discount=0.99,
optimizer_args=dict(
tf_optimizer_args=dict(
learning_rate=0.01,
)
)
)
algo.train()
| 949 | 26.142857 | 89 | py |
rllab | rllab-master/sandbox/rocky/tf/launchers/trpo_cartpole_recurrent.py | from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy
from sandbox.rocky.tf.policies.gaussian_lstm_policy import GaussianLSTMPolicy
from sandbox.rocky.tf.envs.base import TfEnv
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer, FiniteDifferenceHvp
from rllab.misc.instrument import stub, run_experiment_lite
env = TfEnv(normalize(CartpoleEnv()))
policy = GaussianLSTMPolicy(
name="policy",
env_spec=env.spec,
lstm_layer_cls=L.TfBasicLSTMLayer,
# gru_layer_cls=L.GRULayer,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=10,
discount=0.99,
step_size=0.01,
optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
)
algo.train()
| 1,148 | 31.828571 | 116 | py |
rllab | rllab-master/sandbox/rocky/tf/launchers/trpo_cartpole.py | from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import FiniteDifferenceHvp
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.misc.instrument import stub, run_experiment_lite
env = TfEnv(normalize(CartpoleEnv()))
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=0.01,
# optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
)
algo.train()
| 1,144 | 31.714286 | 95 | py |
rllab | rllab-master/sandbox/rocky/tf/launchers/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/core/network.py | import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
import numpy as np
import itertools
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.parameterized import Parameterized
from sandbox.rocky.tf.core.layers_powered import LayersPowered
class MLP(LayersPowered, Serializable):
def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity,
output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(),
output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(),
input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False,
):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if input_layer is None:
l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input")
else:
l_in = input_layer
self._layers = [l_in]
l_hid = l_in
if batch_normalization:
l_hid = L.batch_norm(l_hid)
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = L.DenseLayer(
l_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
weight_normalization=weight_normalization
)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
self._layers.append(l_hid)
l_out = L.DenseLayer(
l_hid,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="output",
W=output_W_init,
b=output_b_init,
weight_normalization=weight_normalization
)
if batch_normalization:
l_out = L.batch_norm(l_out)
self._layers.append(l_out)
self._l_in = l_in
self._l_out = l_out
# self._input_var = l_in.input_var
self._output = L.get_output(l_out)
LayersPowered.__init__(self, l_out)
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
@property
def layers(self):
return self._layers
@property
def output(self):
return self._output
class ConvNetwork(LayersPowered, Serializable):
def __init__(self, name, input_shape, output_dim,
conv_filters, conv_filter_sizes, conv_strides, conv_pads,
hidden_sizes, hidden_nonlinearity, output_nonlinearity,
hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(),
output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(),
input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False):
Serializable.quick_init(self, locals())
"""
A network composed of several convolution layers followed by some fc layers.
input_shape: (width,height,channel)
HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.
conv_filters: a list of numbers of convolution kernel
conv_filter_sizes: a list of sizes (int) of the convolution kernels
conv_strides: a list of strides (int) of the conv kernels
conv_pads: a list of pad formats (either 'SAME' or 'VALID')
hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers
hidden_sizes: a list of numbers of hidden units for all fc layers
"""
with tf.variable_scope(name):
if input_layer is not None:
l_in = input_layer
l_hid = l_in
elif len(input_shape) == 3:
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input")
l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input")
elif len(input_shape) == 2:
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input")
input_shape = (1,) + input_shape
l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input")
else:
l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input")
l_hid = l_in
if batch_normalization:
l_hid = L.batch_norm(l_hid)
for idx, conv_filter, filter_size, stride, pad in zip(
range(len(conv_filters)),
conv_filters,
conv_filter_sizes,
conv_strides,
conv_pads,
):
l_hid = L.Conv2DLayer(
l_hid,
num_filters=conv_filter,
filter_size=filter_size,
stride=(stride, stride),
pad=pad,
nonlinearity=hidden_nonlinearity,
name="conv_hidden_%d" % idx,
weight_normalization=weight_normalization,
)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
if output_nonlinearity == L.spatial_expected_softmax:
assert len(hidden_sizes) == 0
assert output_dim == conv_filters[-1] * 2
l_hid.nonlinearity = tf.identity
l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
else:
l_hid = L.flatten(l_hid, name="conv_flatten")
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = L.DenseLayer(
l_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
weight_normalization=weight_normalization,
)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
l_out = L.DenseLayer(
l_hid,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="output",
W=output_W_init,
b=output_b_init,
weight_normalization=weight_normalization,
)
if batch_normalization:
l_out = L.batch_norm(l_out)
self._l_in = l_in
self._l_out = l_out
# self._input_var = l_in.input_var
LayersPowered.__init__(self, l_out)
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
class GRUNetwork(object):
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
gru_layer_cls=L.GRULayer,
output_nonlinearity=None, input_var=None, input_layer=None, layer_args=None):
with tf.variable_scope(name):
if input_layer is None:
l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input")
l_step_prev_state = L.InputLayer(shape=(None, hidden_dim), name="step_prev_state")
if layer_args is None:
layer_args = dict()
l_gru = gru_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
hidden_init_trainable=False, name="gru", **layer_args)
l_gru_flat = L.ReshapeLayer(
l_gru, shape=(-1, hidden_dim),
name="gru_flat"
)
l_output_flat = L.DenseLayer(
l_gru_flat,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="output_flat"
)
l_output = L.OpLayer(
l_output_flat,
op=lambda flat_output, l_input:
tf.reshape(flat_output, tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
shape_op=lambda flat_output_shape, l_input_shape:
(l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
extras=[l_in],
name="output"
)
l_step_state = l_gru.get_step_layer(l_step_input, l_step_prev_state, name="step_state")
l_step_hidden = l_step_state
l_step_output = L.DenseLayer(
l_step_hidden,
num_units=output_dim,
nonlinearity=output_nonlinearity,
W=l_output_flat.W,
b=l_output_flat.b,
name="step_output"
)
self._l_in = l_in
self._hid_init_param = l_gru.h0
self._l_gru = l_gru
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_state = l_step_prev_state
self._l_step_hidden = l_step_hidden
self._l_step_state = l_step_state
self._l_step_output = l_step_output
self._hidden_dim = hidden_dim
@property
def state_dim(self):
return self._hidden_dim
@property
def hidden_dim(self):
return self._hidden_dim
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def recurrent_layer(self):
return self._l_gru
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_state_layer(self):
return self._l_step_prev_state
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_state_layer(self):
return self._l_step_state
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
@property
def state_init_param(self):
return self._hid_init_param
class LSTMNetwork(object):
def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
lstm_layer_cls=L.LSTMLayer,
output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False,
layer_args=None):
with tf.variable_scope(name):
if input_layer is None:
l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input")
# contains previous hidden and cell state
l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2), name="step_prev_state")
if layer_args is None:
layer_args = dict()
l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
hidden_init_trainable=False, name="lstm", forget_bias=forget_bias,
cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args)
l_lstm_flat = L.ReshapeLayer(
l_lstm, shape=(-1, hidden_dim),
name="lstm_flat"
)
l_output_flat = L.DenseLayer(
l_lstm_flat,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="output_flat"
)
l_output = L.OpLayer(
l_output_flat,
op=lambda flat_output, l_input:
tf.reshape(flat_output, tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
shape_op=lambda flat_output_shape, l_input_shape:
(l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
extras=[l_in],
name="output"
)
l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name="step_state")
l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name="step_hidden")
l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name="step_cell")
l_step_output = L.DenseLayer(
l_step_hidden,
num_units=output_dim,
nonlinearity=output_nonlinearity,
W=l_output_flat.W,
b=l_output_flat.b,
name="step_output"
)
self._l_in = l_in
self._hid_init_param = l_lstm.h0
self._cell_init_param = l_lstm.c0
self._l_lstm = l_lstm
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_state = l_step_prev_state
self._l_step_hidden = l_step_hidden
self._l_step_cell = l_step_cell
self._l_step_state = l_step_state
self._l_step_output = l_step_output
self._hidden_dim = hidden_dim
@property
def state_dim(self):
return self._hidden_dim * 2
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def recurrent_layer(self):
return self._l_lstm
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_state_layer(self):
return self._l_step_prev_state
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_state_layer(self):
return self._l_step_state
@property
def step_cell_layer(self):
return self._l_step_cell
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
@property
def cell_init_param(self):
return self._cell_init_param
@property
def state_init_param(self):
return tf.concat(axis=0, values=[self._hid_init_param, self._cell_init_param])
class ConvMergeNetwork(LayersPowered, Serializable):
"""
This network allows the input to consist of a convolution-friendly component, plus a non-convolution-friendly
component. These two components will be concatenated in the fully connected layers. There can also be a list of
optional layers for the non-convolution-friendly component alone.
The input to the network should be a matrix where each row is a single input entry, with both the aforementioned
components flattened out and then concatenated together
"""
def __init__(self, name, input_shape, extra_input_shape, output_dim, hidden_sizes,
conv_filters, conv_filter_sizes, conv_strides, conv_pads,
extra_hidden_sizes=None,
hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(),
output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
input_var=None, input_layer=None):
Serializable.quick_init(self, locals())
if extra_hidden_sizes is None:
extra_hidden_sizes = []
with tf.variable_scope(name):
input_flat_dim = np.prod(input_shape)
extra_input_flat_dim = np.prod(extra_input_shape)
total_input_flat_dim = input_flat_dim + extra_input_flat_dim
if input_layer is None:
l_in = L.InputLayer(shape=(None, total_input_flat_dim), input_var=input_var, name="input")
else:
l_in = input_layer
l_conv_in = L.reshape(
L.SliceLayer(
l_in,
indices=slice(input_flat_dim),
name="conv_slice"
),
([0],) + input_shape,
name="conv_reshaped"
)
l_extra_in = L.reshape(
L.SliceLayer(
l_in,
indices=slice(input_flat_dim, None),
name="extra_slice"
),
([0],) + extra_input_shape,
name="extra_reshaped"
)
l_conv_hid = l_conv_in
for idx, conv_filter, filter_size, stride, pad in zip(
range(len(conv_filters)),
conv_filters,
conv_filter_sizes,
conv_strides,
conv_pads,
):
l_conv_hid = L.Conv2DLayer(
l_conv_hid,
num_filters=conv_filter,
filter_size=filter_size,
stride=(stride, stride),
pad=pad,
nonlinearity=hidden_nonlinearity,
name="conv_hidden_%d" % idx,
)
l_extra_hid = l_extra_in
for idx, hidden_size in enumerate(extra_hidden_sizes):
l_extra_hid = L.DenseLayer(
l_extra_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="extra_hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
)
l_joint_hid = L.concat(
[L.flatten(l_conv_hid, name="conv_hidden_flat"), l_extra_hid],
name="joint_hidden"
)
for idx, hidden_size in enumerate(hidden_sizes):
l_joint_hid = L.DenseLayer(
l_joint_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="joint_hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
)
l_out = L.DenseLayer(
l_joint_hid,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="output",
W=output_W_init,
b=output_b_init,
)
self._l_in = l_in
self._l_out = l_out
LayersPowered.__init__(self, [l_out], input_layers=[l_in])
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
| 19,707 | 35.837383 | 155 | py |
rllab | rllab-master/sandbox/rocky/tf/core/layers.py | # -*- coding: utf-8 -*-
import functools
import numpy as np
import math
import tensorflow as tf
from tensorflow.python.training import moving_averages
from collections import OrderedDict
from collections import deque
from itertools import chain
from inspect import getargspec
from difflib import get_close_matches
from warnings import warn
class G(object):
pass
G._n_layers = 0
def create_param(spec, shape, name, trainable=True, regularizable=True):
if not hasattr(spec, '__call__'):
assert isinstance(spec, (tf.Tensor, tf.Variable))
return spec
assert hasattr(spec, '__call__')
if regularizable:
# use the default regularizer
regularizer = None
else:
# do not regularize this variable
regularizer = lambda _: tf.constant(0.)
return tf.get_variable(
name=name, shape=shape, initializer=spec, trainable=trainable,
regularizer=regularizer, dtype=tf.float32
)
def as_tuple(x, N, t=None):
try:
X = tuple(x)
except TypeError:
X = (x,) * N
if (t is not None) and not all(isinstance(v, t) for v in X):
raise TypeError("expected a single value or an iterable "
"of {0}, got {1} instead".format(t.__name__, x))
if len(X) != N:
raise ValueError("expected a single value or an iterable "
"with length {0}, got {1} instead".format(N, x))
return X
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int or None
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int or None
The output size corresponding to the given convolution parameters, or
``None`` if `input_size` is ``None``.
Raises
------
ValueError
When an invalid padding is specified, a `ValueError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
class Layer(object):
def __init__(self, incoming, name=None, variable_reuse=None, weight_normalization=False, **kwargs):
if isinstance(incoming, tuple):
self.input_shape = incoming
self.input_layer = None
else:
self.input_shape = incoming.output_shape
self.input_layer = incoming
self.params = OrderedDict()
self.weight_normalization = weight_normalization
if name is None:
name = "%s_%d" % (type(self).__name__, G._n_layers)
G._n_layers += 1
self.name = name
self.variable_reuse = variable_reuse
self.get_output_kwargs = []
if any(d is not None and d <= 0 for d in self.input_shape):
raise ValueError((
"Cannot create Layer with a non-positive input_shape "
"dimension. input_shape=%r, self.name=%r") % (
self.input_shape, self.name))
@property
def output_shape(self):
shape = self.get_output_shape_for(self.input_shape)
if any(isinstance(s, (tf.Variable, tf.Tensor)) for s in shape):
raise ValueError("%s returned a symbolic output shape from its "
"get_output_shape_for() method: %r. This is not "
"allowed; shapes must be tuples of integers for "
"fixed-size dimensions and Nones for variable "
"dimensions." % (self.__class__.__name__, shape))
return shape
def get_output_shape_for(self, input_shape):
raise NotImplementedError
def get_output_for(self, input, **kwargs):
raise NotImplementedError
def add_param_plain(self, spec, shape, name, **tags):
with tf.variable_scope(self.name, reuse=self.variable_reuse):
tags['trainable'] = tags.get('trainable', True)
tags['regularizable'] = tags.get('regularizable', True)
param = create_param(spec, shape, name, **tags)
self.params[param] = set(tag for tag, value in list(tags.items()) if value)
return param
def add_param(self, spec, shape, name, **kwargs):
param = self.add_param_plain(spec, shape, name, **kwargs)
if name is not None and name.startswith("W") and self.weight_normalization:
# Hacky: check if the parameter is a weight matrix. If so, apply weight normalization
if len(param.get_shape()) == 2:
v = param
g = self.add_param_plain(tf.ones_initializer(), (shape[1],), name=name + "_wn/g")
param = v * (tf.reshape(g, (1, -1)) / tf.sqrt(tf.reduce_sum(tf.square(v), 0, keep_dims=True)))
elif len(param.get_shape()) == 4:
v = param
g = self.add_param_plain(tf.ones_initializer(), (shape[3],), name=name + "_wn/g")
param = v * (tf.reshape(g, (1, 1, 1, -1)) / tf.sqrt(tf.reduce_sum(tf.square(v), [0, 1, 2],
keep_dims=True)))
else:
raise NotImplementedError
return param
def get_params(self, **tags):
result = list(self.params.keys())
only = set(tag for tag, value in list(tags.items()) if value)
if only:
# retain all parameters that have all of the tags in `only`
result = [param for param in result
if not (only - self.params[param])]
exclude = set(tag for tag, value in list(tags.items()) if not value)
if exclude:
# retain all parameters that have none of the tags in `exclude`
result = [param for param in result
if not (self.params[param] & exclude)]
return result
class InputLayer(Layer):
def __init__(self, shape, input_var=None, **kwargs):
super(InputLayer, self).__init__(shape, **kwargs)
self.shape = shape
if input_var is None:
if self.name is not None:
with tf.variable_scope(self.name):
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
else:
input_var = tf.placeholder(tf.float32, shape=shape, name="input")
self.input_var = input_var
@Layer.output_shape.getter
def output_shape(self):
return self.shape
class MergeLayer(Layer):
def __init__(self, incomings, name=None, **kwargs):
self.input_shapes = [incoming if isinstance(incoming, tuple)
else incoming.output_shape
for incoming in incomings]
self.input_layers = [None if isinstance(incoming, tuple)
else incoming
for incoming in incomings]
self.name = name
self.params = OrderedDict()
self.get_output_kwargs = []
@Layer.output_shape.getter
def output_shape(self):
shape = self.get_output_shape_for(self.input_shapes)
if any(isinstance(s, (tf.Variable, tf.Tensor)) for s in shape):
raise ValueError("%s returned a symbolic output shape from its "
"get_output_shape_for() method: %r. This is not "
"allowed; shapes must be tuples of integers for "
"fixed-size dimensions and Nones for variable "
"dimensions." % (self.__class__.__name__, shape))
return shape
def get_output_shape_for(self, input_shapes):
raise NotImplementedError
def get_output_for(self, inputs, **kwargs):
raise NotImplementedError
class ConcatLayer(MergeLayer):
"""
Concatenates multiple inputs along the specified axis. Inputs should have
the same shape except for the dimension specified in axis, which can have
different sizes.
Parameters
-----------
incomings : a list of :class:`Layer` instances or tuples
The layers feeding into this layer, or expected input shapes
axis : int
Axis which inputs are joined over
"""
def __init__(self, incomings, axis=1, **kwargs):
super(ConcatLayer, self).__init__(incomings, **kwargs)
self.axis = axis
def get_output_shape_for(self, input_shapes):
# Infer the output shape by grabbing, for each axis, the first
# input size that is not `None` (if there is any)
output_shape = [next((s for s in sizes if s is not None), None)
for sizes in zip(*input_shapes)]
def match(shape1, shape2):
return (len(shape1) == len(shape2) and
all(i == self.axis or s1 is None or s2 is None or s1 == s2
for i, (s1, s2) in enumerate(zip(shape1, shape2))))
# Check for compatibility with inferred output shape
if not all(match(shape, output_shape) for shape in input_shapes):
raise ValueError("Mismatch: input shapes must be the same except "
"in the concatenation axis")
# Infer output shape on concatenation axis and return
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = None if any(s is None for s in sizes) else sum(sizes)
output_shape[self.axis] = concat_size
return tuple(output_shape)
def get_output_for(self, inputs, **kwargs):
dtypes = [x.dtype.as_numpy_dtype for x in inputs]
if len(set(dtypes)) > 1:
# need to convert to common data type
common_dtype = np.core.numerictypes.find_common_type([], dtypes)
inputs = [tf.cast(x, common_dtype) for x in inputs]
return tf.concat(axis=self.axis, values=inputs)
concat = ConcatLayer # shortcut
class XavierUniformInitializer(object):
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
if len(shape) == 2:
n_inputs, n_outputs = shape
else:
receptive_field_size = np.prod(shape[:2])
n_inputs = shape[-2] * receptive_field_size
n_outputs = shape[-1] * receptive_field_size
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range, dtype=dtype)(shape)
class HeUniformInitializer(object):
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
if len(shape) == 2:
n_inputs, _ = shape
else:
receptive_field_size = np.prod(shape[:2])
n_inputs = shape[-2] * receptive_field_size
init_range = math.sqrt(1.0 / n_inputs)
return tf.random_uniform_initializer(-init_range, init_range, dtype=dtype)(shape)
def py_ortho_init(scale):
def _init(shape):
u, s, v = np.linalg.svd(np.random.uniform(size=shape))
return np.cast['float32'](u * scale)
return _init
class OrthogonalInitializer(object):
def __init__(self, scale=1.1):
self.scale = scale
def __call__(self, shape, dtype=tf.float32, *args, **kwargs):
result, = tf.py_func(py_ortho_init(self.scale), [shape], [tf.float32])
result.set_shape(shape)
return result
class ParamLayer(Layer):
def __init__(self, incoming, num_units, param=tf.zeros_initializer(),
trainable=True, **kwargs):
super(ParamLayer, self).__init__(incoming, **kwargs)
self.num_units = num_units
self.param = self.add_param(
param,
(num_units,),
name="param",
trainable=trainable
)
def get_output_shape_for(self, input_shape):
return input_shape[:-1] + (self.num_units,)
def get_output_for(self, input, **kwargs):
ndim = input.get_shape().ndims
reshaped_param = tf.reshape(self.param, (1,) * (ndim - 1) + (self.num_units,))
tile_arg = tf.concat(axis=0, values=[tf.shape(input)[:ndim - 1], [1]])
tiled = tf.tile(reshaped_param, tile_arg)
return tiled
class OpLayer(MergeLayer):
def __init__(self, incoming, op,
shape_op=lambda x: x, extras=None, **kwargs):
if extras is None:
extras = []
incomings = [incoming] + extras
super(OpLayer, self).__init__(incomings, **kwargs)
self.op = op
self.shape_op = shape_op
self.incomings = incomings
def get_output_shape_for(self, input_shapes):
return self.shape_op(*input_shapes)
def get_output_for(self, inputs, **kwargs):
return self.op(*inputs)
class DenseLayer(Layer):
def __init__(self, incoming, num_units, nonlinearity=None, W=XavierUniformInitializer(), b=tf.zeros_initializer(),
**kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = tf.identity if nonlinearity is None else nonlinearity
self.num_units = num_units
num_inputs = int(np.prod(self.input_shape[1:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b", regularizable=False)
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, **kwargs):
if input.get_shape().ndims > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = tf.reshape(input, tf.stack([tf.shape(input)[0], -1]))
activation = tf.matmul(input, self.W)
if self.b is not None:
activation = activation + tf.expand_dims(self.b, 0)
return self.nonlinearity(activation)
class BaseConvLayer(Layer):
def __init__(self, incoming, num_filters, filter_size, stride=1, pad="VALID",
untie_biases=False,
W=XavierUniformInitializer(), b=tf.zeros_initializer(),
nonlinearity=tf.nn.relu, n=None, **kwargs):
"""
Input is assumed to be of shape batch*height*width*channels
"""
super(BaseConvLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = tf.identity
else:
self.nonlinearity = nonlinearity
if n is None:
n = len(self.input_shape) - 2
elif n != len(self.input_shape) - 2:
raise ValueError("Tried to create a %dD convolution layer with "
"input shape %r. Expected %d input dimensions "
"(batchsize, channels, %d spatial dimensions)." %
(n, self.input_shape, n + 2, n))
self.n = n
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, n, int)
self.stride = as_tuple(stride, n, int)
self.untie_biases = untie_biases
self.pad = pad
if pad == 'SAME':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = self.output_shape[1:3] + (num_filters,) # + self.output_shape[2:]
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[-1]
return self.filter_size + (num_input_channels, self.num_filters)
def get_output_shape_for(self, input_shape):
if self.pad == 'SAME':
pad = ('same',) * self.n
elif self.pad == 'VALID':
pad = (0,) * self.n
else:
import ipdb;
ipdb.set_trace()
raise NotImplementedError
# pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * self.n
batchsize = input_shape[0]
return ((batchsize,) +
tuple(conv_output_length(input, filter, stride, p)
for input, filter, stride, p
in zip(input_shape[1:3], self.filter_size,
self.stride, pad))) + (self.num_filters,)
def get_output_for(self, input, **kwargs):
conved = self.convolve(input, **kwargs)
if self.b is None:
activation = conved
elif self.untie_biases:
# raise NotImplementedError
activation = conved + tf.expand_dims(self.b, 0)
else:
activation = conved + tf.reshape(self.b, (1, 1, 1, self.num_filters))
return self.nonlinearity(activation)
def convolve(self, input, **kwargs):
"""
Symbolically convolves `input` with ``self.W``, producing an output of
shape ``self.output_shape``. To be implemented by subclasses.
Parameters
----------
input : Theano tensor
The input minibatch to convolve
**kwargs
Any additional keyword arguments from :meth:`get_output_for`
Returns
-------
Theano tensor
`input` convolved according to the configuration of this layer,
without any bias or nonlinearity applied.
"""
raise NotImplementedError("BaseConvLayer does not implement the "
"convolve() method. You will want to "
"use a subclass such as Conv2DLayer.")
class Conv2DLayer(BaseConvLayer):
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad="VALID", untie_biases=False,
W=XavierUniformInitializer(), b=tf.zeros_initializer(),
nonlinearity=tf.nn.relu,
convolution=tf.nn.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming=incoming, num_filters=num_filters, filter_size=filter_size,
stride=stride, pad=pad, untie_biases=untie_biases, W=W, b=b,
nonlinearity=nonlinearity, n=2, **kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
conved = self.convolution(input, self.W, strides=(1,) + self.stride + (1,), padding=self.pad)
return conved
def pool_output_length(input_length, pool_size, stride, pad):
if input_length is None or pool_size is None:
return None
if pad == "SAME":
return int(np.ceil(float(input_length) / float(stride)))
return int(np.ceil(float(input_length - pool_size + 1) / float(stride)))
class Pool2DLayer(Layer):
def __init__(self, incoming, pool_size, stride=None, pad="VALID", mode='max', **kwargs):
super(Pool2DLayer, self).__init__(incoming, **kwargs)
self.pool_size = as_tuple(pool_size, 2)
if len(self.input_shape) != 4:
raise ValueError("Tried to create a 2D pooling layer with "
"input shape %r. Expected 4 input dimensions "
"(batchsize, 2 spatial dimensions, channels)."
% (self.input_shape,))
if stride is None:
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 2)
self.pad = pad
self.mode = mode
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape) # copy / convert to mutable list
output_shape[1] = pool_output_length(input_shape[1],
pool_size=self.pool_size[0],
stride=self.stride[0],
pad=self.pad,
)
output_shape[2] = pool_output_length(input_shape[2],
pool_size=self.pool_size[1],
stride=self.stride[1],
pad=self.pad,
)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
assert self.mode == "max"
pooled = tf.nn.max_pool(
input,
ksize=(1,) + self.pool_size + (1,),
strides=(1,) + self.stride + (1,),
padding=self.pad,
)
return pooled
def spatial_expected_softmax(x, temp=1):
assert len(x.get_shape()) == 4
vals = []
for dim in [0, 1]:
dim_val = x.get_shape()[dim + 1].value
lin = tf.linspace(-1.0, 1.0, dim_val)
lin = tf.expand_dims(lin, 1 - dim)
lin = tf.expand_dims(lin, 0)
lin = tf.expand_dims(lin, 3)
m = tf.reduce_max(x, [1, 2], keep_dims=True)
e = tf.exp((x - m) / temp) + 1e-5
val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
vals.append(tf.expand_dims(val, 2))
return tf.reshape(tf.concat(axis=2, values=vals), [-1, x.get_shape()[-1].value * 2])
class SpatialExpectedSoftmaxLayer(Layer):
"""
Computes the softmax across a spatial region, separately for each channel, followed by an expectation operation.
"""
def __init__(self, incoming, **kwargs):
super().__init__(incoming, **kwargs)
# self.temp = self.add_param(tf.ones_initializer, shape=(), name="temperature")
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1] * 2)
def get_output_for(self, input, **kwargs):
return spatial_expected_softmax(input)#, self.temp)
# max_ = tf.reduce_max(input, reduction_indices=[1, 2], keep_dims=True)
# exp = tf.exp(input - max_) + 1e-5
# vals = []
#
# for dim in [0, 1]:
# dim_val = input.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, dim_val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(input, [1, 2], keep_dims=True)
# e = tf.exp(input - m) + 1e-5
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
# vals.append(tf.expand_dims(val, 2))
#
# return tf.reshape(tf.concat(2, vals), [-1, input.get_shape()[-1].value * 2])
# import ipdb; ipdb.set_trace()
# input.get_shape()
# exp / tf.reduce_sum(exp, reduction_indices=[1, 2], keep_dims=True)
# import ipdb;
# ipdb.set_trace()
# spatial softmax?
# for dim in range(2):
# val = obs.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(e, [1, 2], keep_dims=True)
# e = tf.exp(e - m) + 1e-3
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
class DropoutLayer(Layer):
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
super(DropoutLayer, self).__init__(incoming, **kwargs)
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, **kwargs):
"""
Parameters
----------
input : tensor
output from the previous layer
deterministic : bool
If true dropout and scaling is disabled, see notes
"""
if deterministic or self.p == 0:
return input
else:
# Using theano constant to prevent upcasting
# one = T.constant(1)
retain_prob = 1. - self.p
if self.rescale:
input /= retain_prob
# use nonsymbolic shape for dropout mask if possible
return tf.nn.dropout(input, keep_prob=retain_prob)
def get_output_shape_for(self, input_shape):
return input_shape
# TODO: add Conv3DLayer
class FlattenLayer(Layer):
"""
A layer that flattens its input. The leading ``outdim-1`` dimensions of
the output will have the same shape as the input. The remaining dimensions
are collapsed into the last dimension.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
outdim : int
The number of dimensions in the output.
See Also
--------
flatten : Shortcut
"""
def __init__(self, incoming, outdim=2, **kwargs):
super(FlattenLayer, self).__init__(incoming, **kwargs)
self.outdim = outdim
if outdim < 1:
raise ValueError('Dim must be >0, was %i', outdim)
def get_output_shape_for(self, input_shape):
to_flatten = input_shape[self.outdim - 1:]
if any(s is None for s in to_flatten):
flattened = None
else:
flattened = int(np.prod(to_flatten))
return input_shape[:self.outdim - 1] + (flattened,)
def get_output_for(self, input, **kwargs):
# total_entries = tf.reduce_prod(tf.shape(input))
pre_shape = tf.shape(input)[:self.outdim - 1]
to_flatten = tf.reduce_prod(tf.shape(input)[self.outdim - 1:])
return tf.reshape(input, tf.concat(axis=0, values=[pre_shape, tf.stack([to_flatten])]))
flatten = FlattenLayer # shortcut
class ReshapeLayer(Layer):
def __init__(self, incoming, shape, **kwargs):
super(ReshapeLayer, self).__init__(incoming, **kwargs)
shape = tuple(shape)
for s in shape:
if isinstance(s, int):
if s == 0 or s < - 1:
raise ValueError("`shape` integers must be positive or -1")
elif isinstance(s, list):
if len(s) != 1 or not isinstance(s[0], int) or s[0] < 0:
raise ValueError("`shape` input references must be "
"single-element lists of int >= 0")
elif isinstance(s, (tf.Tensor, tf.Variable)): # T.TensorVariable):
raise NotImplementedError
# if s.ndim != 0:
# raise ValueError(
# "A symbolic variable in a shape specification must be "
# "a scalar, but had %i dimensions" % s.ndim)
else:
raise ValueError("`shape` must be a tuple of int and/or [int]")
if sum(s == -1 for s in shape) > 1:
raise ValueError("`shape` cannot contain multiple -1")
self.shape = shape
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape, **kwargs):
# Initialize output shape from shape specification
output_shape = list(self.shape)
# First, replace all `[i]` with the corresponding input dimension, and
# mask parts of the shapes thus becoming irrelevant for -1 inference
masked_input_shape = list(input_shape)
masked_output_shape = list(output_shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
if o[0] >= len(input_shape):
raise ValueError("specification contains [%d], but input "
"shape has %d dimensions only" %
(o[0], len(input_shape)))
output_shape[dim] = input_shape[o[0]]
masked_output_shape[dim] = input_shape[o[0]]
if (input_shape[o[0]] is None) \
and (masked_input_shape[o[0]] is None):
# first time we copied this unknown input size: mask
# it, we have a 1:1 correspondence between out[dim] and
# in[o[0]] and can ignore it for -1 inference even if
# it is unknown.
masked_input_shape[o[0]] = 1
masked_output_shape[dim] = 1
# Secondly, replace all symbolic shapes with `None`, as we cannot
# infer their size here.
for dim, o in enumerate(output_shape):
if isinstance(o, (tf.Tensor, tf.Variable)): # T.TensorVariable):
raise NotImplementedError
# output_shape[dim] = None
# masked_output_shape[dim] = None
# From the shapes, compute the sizes of the input and output tensor
input_size = (None if any(x is None for x in masked_input_shape)
else np.prod(masked_input_shape))
output_size = (None if any(x is None for x in masked_output_shape)
else np.prod(masked_output_shape))
del masked_input_shape, masked_output_shape
# Finally, infer value for -1 if needed
if -1 in output_shape:
dim = output_shape.index(-1)
if (input_size is None) or (output_size is None):
output_shape[dim] = None
output_size = None
else:
output_size *= -1
output_shape[dim] = input_size // output_size
output_size *= output_shape[dim]
# Sanity check
if (input_size is not None) and (output_size is not None) \
and (input_size != output_size):
raise ValueError("%s cannot be reshaped to specification %s. "
"The total size mismatches." %
(input_shape, self.shape))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
# Replace all `[i]` with the corresponding input dimension
output_shape = list(self.shape)
for dim, o in enumerate(output_shape):
if isinstance(o, list):
output_shape[dim] = tf.shape(input)[o[0]]
# Everything else is handled by Theano
return tf.reshape(input, tf.stack(output_shape))
reshape = ReshapeLayer # shortcut
class SliceLayer(Layer):
def __init__(self, incoming, indices, axis=-1, **kwargs):
super(SliceLayer, self).__init__(incoming, **kwargs)
self.slice = indices
self.axis = axis
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.slice, int):
del output_shape[self.axis]
elif input_shape[self.axis] is not None:
output_shape[self.axis] = len(
list(range(*self.slice.indices(input_shape[self.axis]))))
else:
output_shape[self.axis] = None
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
axis = self.axis
ndims = input.get_shape().ndims
if axis < 0:
axis += ndims
if isinstance(self.slice, int) and self.slice < 0:
return tf.reverse(input, [self.axis + 1])[
(slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1)
]
# import ipdb; ipdb.set_trace()
return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
class DimshuffleLayer(Layer):
def __init__(self, incoming, pattern, **kwargs):
super(DimshuffleLayer, self).__init__(incoming, **kwargs)
# Sanity check the pattern
used_dims = set()
for p in pattern:
if isinstance(p, int):
# Dimension p
if p in used_dims:
raise ValueError("pattern contains dimension {0} more "
"than once".format(p))
used_dims.add(p)
elif p == 'x':
# Broadcast
pass
else:
raise ValueError("pattern should only contain dimension"
"indices or 'x', not {0}".format(p))
self.pattern = pattern
# try computing the output shape once as a sanity check
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape):
# Build output shape while keeping track of the dimensions that we are
# attempting to collapse, so we can ensure that they are broadcastable
output_shape = []
dims_used = [False] * len(input_shape)
for p in self.pattern:
if isinstance(p, int):
if p < 0 or p >= len(input_shape):
raise ValueError("pattern contains {0}, but input shape "
"has {1} dimensions "
"only".format(p, len(input_shape)))
# Dimension p
o = input_shape[p]
dims_used[p] = True
elif p == 'x':
# Broadcast; will be of size 1
o = 1
output_shape.append(o)
for i, (dim_size, used) in enumerate(zip(input_shape, dims_used)):
if not used and dim_size != 1 and dim_size is not None:
raise ValueError(
"pattern attempted to collapse dimension "
"{0} of size {1}; dimensions with size != 1/None are not"
"broadcastable and cannot be "
"collapsed".format(i, dim_size))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return tf.transpose(input, self.pattern)
dimshuffle = DimshuffleLayer # shortcut
def apply_ln(layer):
def _normalize(x, prefix):
EPS = 1e-5
dim = x.get_shape()[-1].value
bias_name = prefix + "_ln/bias"
scale_name = prefix + "_ln/scale"
if bias_name not in layer.norm_params:
layer.norm_params[bias_name] = layer.add_param(
tf.zeros_initializer(), (dim,), name=bias_name, regularizable=False)
if scale_name not in layer.norm_params:
layer.norm_params[scale_name] = layer.add_param(
tf.ones_initializer(), (dim,), name=scale_name)
bias = layer.norm_params[bias_name]
scale = layer.norm_params[scale_name]
mean, var = tf.nn.moments(x, axes=[1], keep_dims=True)
x_normed = (x - mean) / tf.sqrt(var + EPS)
return x_normed * scale + bias
return _normalize
class GRULayer(Layer):
"""
A gated recurrent unit implements the following update mechanism:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) * (h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = (1 - u(t)) * h(t-1) + u_t * c(t)
Note that the reset, update, and cell vectors must have the same dimension as the hidden state
"""
def __init__(self, incoming, num_units, hidden_nonlinearity,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(), hidden_init_trainable=False,
layer_normalization=False, **kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(GRULayer, self).__init__(incoming, **kwargs)
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
self.layer_normalization = layer_normalization
# Weights for the initial hidden state
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(W_x_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(W_h_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
# Weights for the update gate
self.W_xu = self.add_param(W_x_init, (input_dim, num_units), name="W_xu")
self.W_hu = self.add_param(W_h_init, (num_units, num_units), name="W_hu")
self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
self.W_x_ruc = tf.concat(axis=1, values=[self.W_xr, self.W_xu, self.W_xc])
self.W_h_ruc = tf.concat(axis=1, values=[self.W_hr, self.W_hu, self.W_hc])
self.W_x_ru = tf.concat(axis=1, values=[self.W_xr, self.W_xu])
self.W_h_ru = tf.concat(axis=1, values=[self.W_hr, self.W_hu])
self.b_ruc = tf.concat(axis=0, values=[self.b_r, self.b_u, self.b_c])
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.norm_params = dict()
# pre-run the step method to initialize the normalization parameters
h_dummy = tf.placeholder(dtype=tf.float32, shape=(None, num_units), name="h_dummy")
x_dummy = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="x_dummy")
self.step(h_dummy, x_dummy)
def step(self, hprev, x):
if self.layer_normalization:
ln = apply_ln(self)
x_ru = ln(tf.matmul(x, self.W_x_ru), "x_ru")
h_ru = ln(tf.matmul(hprev, self.W_h_ru), "h_ru")
x_r, x_u = tf.split(axis=1, num_or_size_splits=2, value=x_ru)
h_r, h_u = tf.split(axis=1, num_or_size_splits=2, value=h_ru)
x_c = ln(tf.matmul(x, self.W_xc), "x_c")
h_c = ln(tf.matmul(hprev, self.W_hc), "h_c")
r = self.gate_nonlinearity(x_r + h_r)
u = self.gate_nonlinearity(x_u + h_u)
c = self.nonlinearity(x_c + r * h_c)
h = (1 - u) * hprev + u * c
return h
else:
xb_ruc = tf.matmul(x, self.W_x_ruc) + tf.reshape(self.b_ruc, (1, -1))
h_ruc = tf.matmul(hprev, self.W_h_ruc)
xb_r, xb_u, xb_c = tf.split(axis=1, num_or_size_splits=3, value=xb_ruc)
h_r, h_u, h_c = tf.split(axis=1, num_or_size_splits=3, value=h_ruc)
r = self.gate_nonlinearity(xb_r + h_r)
u = self.gate_nonlinearity(xb_u + h_u)
c = self.nonlinearity(xb_c + r * h_c)
h = (1 - u) * hprev + u * c
return h
def get_step_layer(self, l_in, l_prev_hidden, name=None):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']:
h0s = kwargs['recurrent_state'][self]
else:
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hs = tf.scan(
self.step,
elems=shuffled_input,
initializer=h0s
)
shuffled_hs = tf.transpose(hs, (1, 0, 2))
if 'recurrent_state_output' in kwargs:
kwargs['recurrent_state_output'][self] = shuffled_hs
return shuffled_hs
class GRUStepLayer(MergeLayer):
def __init__(self, incomings, recurrent_layer, **kwargs):
super(GRUStepLayer, self).__init__(incomings, **kwargs)
self._gru_layer = recurrent_layer
def get_params(self, **tags):
return self._gru_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0][0]
return n_batch, self._gru_layer.num_units
def get_output_for(self, inputs, **kwargs):
x, hprev = inputs
n_batch = tf.shape(x)[0]
x = tf.reshape(x, tf.stack([n_batch, -1]))
x.set_shape((None, self.input_shapes[0][1]))
return self._gru_layer.step(hprev, x)
class TfGRULayer(Layer):
"""
Use TensorFlow's built-in GRU implementation
"""
def __init__(self, incoming, num_units, hidden_nonlinearity, horizon=None, hidden_init_trainable=False,
**kwargs):
assert len(incoming.output_shape) == 3
input_dim = incoming.shape[2]
gru = tf.nn.rnn_cell.GRUCell(num_units=num_units, activation=hidden_nonlinearity)
self.num_units = num_units
self.horizon = horizon
self.gru = gru
self.hidden_nonlinearity = hidden_nonlinearity
Layer.__init__(self, incoming=incoming, **kwargs)
# dummy input variable
input_dummy = tf.placeholder(tf.float32, (None, input_dim), "input_dummy")
hidden_dummy = tf.placeholder(tf.float32, (None, num_units), "hidden_dummy")
with tf.variable_scope(self.name) as vs:
gru(input_dummy, hidden_dummy, scope=vs)
vs.reuse_variables()
self.scope = vs
all_vars = [v for v in tf.global_variables() if v.name.startswith(vs.name)]
trainable_vars = [v for v in tf.trainable_variables() if v.name.startswith(vs.name)]
for var in trainable_vars:
self.add_param(spec=var, shape=None, name=None, trainable=True)
for var in set(all_vars) - set(trainable_vars):
self.add_param(spec=var, shape=None, name=None, trainable=False)
self.h0 = self.add_param(tf.zeros_initializer(), (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
def step(self, hprev, x):
return self.gru(x, hprev, scope=self.scope)[1]
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
state = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
state.set_shape((None, self.num_units))
if self.horizon is not None:
outputs = []
for idx in range(self.horizon):
output, state = self.gru(input[:, idx, :], state, scope=self.scope) # self.name)
outputs.append(tf.expand_dims(output, 1))
outputs = tf.concat(axis=1, values=outputs)
return outputs
else:
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
shuffled_input.set_shape((None, None, self.input_shape[-1]))
hs = tf.scan(
self.step,
elems=shuffled_input,
initializer=state
)
shuffled_hs = tf.transpose(hs, (1, 0, 2))
return shuffled_hs
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_step_layer(self, l_in, l_prev_hidden, name=None):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], recurrent_layer=self, name=name)
class PseudoLSTMLayer(Layer):
"""
A Pseudo LSTM unit implements the following update mechanism:
Incoming gate: i(t) = σ(W_hi @ h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ h(t-1)) + W_xf @ x(t) + b_f)
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell gate: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
If gate_squash_inputs is set to True, we have the following updates instead:
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
Incoming gate: i(t) = σ(W_hi @ (o(t) * h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ (o(t) * h(t-1)) + W_xf @ x(t) + b_f)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell state: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
Note that the incoming, forget, cell, and out vectors must have the same dimension as the hidden state
The notation is slightly different from
http://r2rt.com/written-memories-understanding-deriving-and-extending-the-lstm.html: here we introduce the cell
gate and swap its role with the hidden state, so that the output is the same as the hidden state (and we can use
this as a drop-in replacement for LSTMLayer).
"""
def __init__(self, incoming, num_units, hidden_nonlinearity=tf.tanh,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
forget_bias=1.0, b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(),
hidden_init_trainable=False, cell_init=tf.zeros_initializer(), cell_init_trainable=False,
gate_squash_inputs=False, layer_normalization=False, **kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(PseudoLSTMLayer, self).__init__(incoming, **kwargs)
self.layer_normalization = layer_normalization
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
# Weights for the initial hidden state (this is actually not used, since the initial hidden state is
# determined by the initial cell state via h0 = self.nonlinearity(c0)). It is here merely for
# interface convenience
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the initial cell state
self.c0 = self.add_param(cell_init, (num_units,), name="c0", trainable=cell_init_trainable,
regularizable=False)
# Weights for the incoming gate
self.W_xi = self.add_param(W_x_init, (input_dim, num_units), name="W_xi")
self.W_hi = self.add_param(W_h_init, (num_units, num_units), name="W_hi")
self.b_i = self.add_param(b_init, (num_units,), name="b_i", regularizable=False)
# Weights for the forget gate
self.W_xf = self.add_param(W_x_init, (input_dim, num_units), name="W_xf")
self.W_hf = self.add_param(W_h_init, (num_units, num_units), name="W_hf")
self.b_f = self.add_param(b_init, (num_units,), name="b_f", regularizable=False)
# Weights for the out gate
self.W_xo = self.add_param(W_x_init, (input_dim, num_units), name="W_xo")
self.W_ho = self.add_param(W_h_init, (num_units, num_units), name="W_ho")
self.b_o = self.add_param(b_init, (num_units,), name="b_o", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.forget_bias = forget_bias
self.gate_squash_inputs = gate_squash_inputs
self.W_x_ifo = tf.concat(axis=1, values=[self.W_xi, self.W_xf, self.W_xo])
self.W_h_ifo = tf.concat(axis=1, values=[self.W_hi, self.W_hf, self.W_ho])
self.W_x_if = tf.concat(axis=1, values=[self.W_xi, self.W_xf])
self.W_h_if = tf.concat(axis=1, values=[self.W_hi, self.W_hf])
self.norm_params = dict()
def step(self, hcprev, x):
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
if self.layer_normalization:
ln = apply_ln(self)
else:
ln = lambda x, *args: x
if self.gate_squash_inputs:
"""
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
Incoming gate: i(t) = σ(W_hi @ (o(t) * h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ (o(t) * h(t-1)) + W_xf @ x(t) + b_f)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell state: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
"""
o = self.nonlinearity(
ln(tf.matmul(hprev, self.W_ho), "h_o") +
ln(tf.matmul(x, self.W_xo), "x_o") + self.b_o
)
x_if = ln(tf.matmul(x, self.W_x_if), "x_if")
h_if = ln(tf.matmul(o * hprev, self.W_h_if), "h_if")
x_i, x_f = tf.split(axis=1, num_or_size_splits=2, value=x_if)
h_i, h_f = tf.split(axis=1, num_or_size_splits=2, value=h_if)
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
c_new = self.nonlinearity(
ln(tf.matmul(o * hprev, self.W_hc), "h_c") +
ln(tf.matmul(x, self.W_xc), "x_c") +
self.b_c
)
c = f * cprev + i * c_new
h = self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
else:
"""
Incoming gate: i(t) = σ(W_hi @ h(t-1)) + W_xi @ x(t) + b_i)
Forget gate: f(t) = σ(W_hf @ h(t-1)) + W_xf @ x(t) + b_f)
Out gate: o(t) = σ(W_ho @ h(t-1)) + W_xo @ x(t) + b_o)
New cell gate: c_new(t) = ϕ(W_hc @ (o(t) * h(t-1)) + W_xc @ x(t) + b_c)
Cell gate: c(t) = f(t) * c(t-1) + i(t) * c_new(t)
Hidden state: h(t) = ϕ(c(t))
Output: out = h(t)
"""
x_ifo = ln(tf.matmul(x, self.W_x_ifo), "x_ifo")
h_ifo = ln(tf.matmul(hprev, self.W_h_ifo), "h_ifo")
x_i, x_f, x_o = tf.split(axis=1, num_or_size_splits=3, value=x_ifo)
h_i, h_f, h_o = tf.split(axis=1, num_or_size_splits=3, value=h_ifo)
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.b_o)
c_new = self.nonlinearity(
ln(tf.matmul(o * hprev, self.W_hc), "h_c") +
ln(tf.matmul(x, self.W_xc), "x_c") +
self.b_c
)
c = f * cprev + i * c_new
h = self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
h0s = self.nonlinearity(c0s)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s])
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
return shuffled_hs
class LSTMLayer(Layer):
"""
A LSTM unit implements the following update mechanism:
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi + w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf + w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc + h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho + w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
Note that the incoming, forget, cell, and out vectors must have the same dimension as the hidden state
"""
def __init__(self, incoming, num_units, hidden_nonlinearity=tf.tanh,
gate_nonlinearity=tf.nn.sigmoid, W_x_init=XavierUniformInitializer(), W_h_init=OrthogonalInitializer(),
forget_bias=1.0, use_peepholes=False, w_init=tf.random_normal_initializer(stddev=0.1),
b_init=tf.zeros_initializer(), hidden_init=tf.zeros_initializer(), hidden_init_trainable=False,
cell_init=tf.zeros_initializer(), cell_init_trainable=False, layer_normalization=False,
**kwargs):
if hidden_nonlinearity is None:
hidden_nonlinearity = tf.identity
if gate_nonlinearity is None:
gate_nonlinearity = tf.identity
super(LSTMLayer, self).__init__(incoming, **kwargs)
self.layer_normalization = layer_normalization
input_shape = self.input_shape[2:]
input_dim = np.prod(input_shape)
# Weights for the initial hidden state
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the initial cell state
self.c0 = self.add_param(cell_init, (num_units,), name="c0", trainable=cell_init_trainable,
regularizable=False)
# Weights for the incoming gate
self.W_xi = self.add_param(W_x_init, (input_dim, num_units), name="W_xi")
self.W_hi = self.add_param(W_h_init, (num_units, num_units), name="W_hi")
if use_peepholes:
self.w_ci = self.add_param(w_init, (num_units,), name="w_ci")
self.b_i = self.add_param(b_init, (num_units,), name="b_i", regularizable=False)
# Weights for the forget gate
self.W_xf = self.add_param(W_x_init, (input_dim, num_units), name="W_xf")
self.W_hf = self.add_param(W_h_init, (num_units, num_units), name="W_hf")
if use_peepholes:
self.w_cf = self.add_param(w_init, (num_units,), name="w_cf")
self.b_f = self.add_param(b_init, (num_units,), name="b_f", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_x_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_h_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(W_x_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(W_h_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
# Weights for the out gate
self.W_xo = self.add_param(W_x_init, (input_dim, num_units), name="W_xo")
self.W_ho = self.add_param(W_h_init, (num_units, num_units), name="W_ho")
if use_peepholes:
self.w_co = self.add_param(w_init, (num_units,), name="w_co")
self.b_o = self.add_param(b_init, (num_units,), name="b_o", regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
self.forget_bias = forget_bias
self.use_peepholes = use_peepholes
self.W_x_ifco = tf.concat(axis=1, values=[self.W_xi, self.W_xf, self.W_xc, self.W_xo])
self.W_h_ifco = tf.concat(axis=1, values=[self.W_hi, self.W_hf, self.W_hc, self.W_ho])
if use_peepholes:
self.w_c_ifo = tf.concat(axis=0, values=[self.w_ci, self.w_cf, self.w_co])
self.norm_params = dict()
def step(self, hcprev, x):
"""
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi + w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf + w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc + h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho + w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
"""
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
if self.layer_normalization:
ln = apply_ln(self)
else:
ln = lambda x, *args: x
x_ifco = ln(tf.matmul(x, self.W_x_ifco), "x_ifco")
h_ifco = ln(tf.matmul(hprev, self.W_h_ifco), "h_ifco")
x_i, x_f, x_c, x_o = tf.split(axis=1, num_or_size_splits=4, value=x_ifco)
h_i, h_f, h_c, h_o = tf.split(axis=1, num_or_size_splits=4, value=h_ifco)
if self.use_peepholes:
i = self.gate_nonlinearity(x_i + h_i + self.w_ci * cprev + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.w_cf * cprev + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.w_co * cprev + self.b_o)
else:
i = self.gate_nonlinearity(x_i + h_i + self.b_i)
f = self.gate_nonlinearity(x_f + h_f + self.b_f + self.forget_bias)
o = self.gate_nonlinearity(x_o + h_o + self.b_o)
c = f * cprev + i * self.nonlinearity(x_c + h_c + self.b_c)
h = o * self.nonlinearity(ln(c, "c"))
return tf.concat(axis=1, values=[h, c])
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s])
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
if 'recurrent_state_output' in kwargs:
kwargs['recurrent_state_output'][self] = shuffled_hcs
return shuffled_hs
class LSTMStepLayer(MergeLayer):
def __init__(self, incomings, recurrent_layer, **kwargs):
super(LSTMStepLayer, self).__init__(incomings, **kwargs)
self._recurrent_layer = recurrent_layer
def get_params(self, **tags):
return self._recurrent_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0][0]
return n_batch, 2 * self._recurrent_layer.num_units
def get_output_for(self, inputs, **kwargs):
x, hcprev = inputs
n_batch = tf.shape(x)[0]
x = tf.reshape(x, tf.stack([n_batch, -1]))
hc = self._recurrent_layer.step(hcprev, x)
return hc
class TfBasicLSTMLayer(Layer):
"""
Use TensorFlow's built-in (basic) LSTM implementation
"""
def __init__(self, incoming, num_units, hidden_nonlinearity, horizon=None, hidden_init_trainable=False,
forget_bias=1.0, use_peepholes=False, **kwargs):
assert not use_peepholes, "Basic LSTM does not support peepholes!"
assert len(incoming.output_shape) == 3
input_dim = incoming.shape[2]
lstm = tf.contrib.rnn.BasicLSTMCell(
num_units=num_units,
activation=hidden_nonlinearity,
state_is_tuple=True,
forget_bias=forget_bias
)
self.num_units = num_units
self.horizon = horizon
self.lstm = lstm
self.hidden_nonlinearity = hidden_nonlinearity
Layer.__init__(self, incoming=incoming, **kwargs)
# dummy input variable
input_dummy = tf.placeholder(tf.float32, (None, input_dim), "input_dummy")
hidden_dummy = tf.placeholder(tf.float32, (None, num_units), "hidden_dummy")
cell_dummy = tf.placeholder(tf.float32, (None, num_units), "cell_dummy")
with tf.variable_scope(self.name) as vs:
lstm(input_dummy, (cell_dummy, hidden_dummy), scope=vs)
vs.reuse_variables()
self.scope = vs
all_vars = [v for v in tf.global_variables() if v.name.startswith(vs.name)]
trainable_vars = [v for v in tf.trainable_variables() if v.name.startswith(vs.name)]
for var in trainable_vars:
self.add_param(spec=var, shape=None, name=None, trainable=True)
for var in set(all_vars) - set(trainable_vars):
self.add_param(spec=var, shape=None, name=None, trainable=False)
self.h0 = self.add_param(tf.zeros_initializer(), (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
self.c0 = self.add_param(tf.zeros_initializer(), (num_units,), name="c0", trainable=hidden_init_trainable,
regularizable=False)
def step(self, hcprev, x):
hprev = hcprev[:, :self.num_units]
cprev = hcprev[:, self.num_units:]
x.set_shape((None, self.input_shape[-1]))
c, h = self.lstm(x, (cprev, hprev), scope=self.scope)[1]
return tf.concat(axis=1, values=[h, c])
def get_output_for(self, input, **kwargs):
input_shape = tf.shape(input)
n_batches = input_shape[0]
h0s = tf.tile(
tf.reshape(self.h0, (1, self.num_units)),
(n_batches, 1)
)
h0s.set_shape((None, self.num_units))
c0s = tf.tile(
tf.reshape(self.c0, (1, self.num_units)),
(n_batches, 1)
)
c0s.set_shape((None, self.num_units))
state = (c0s, h0s)
if self.horizon is not None:
outputs = []
for idx in range(self.horizon):
output, state = self.lstm(input[:, idx, :], state, scope=self.scope) # self.name)
outputs.append(tf.expand_dims(output, 1))
outputs = tf.concat(axis=1, values=outputs)
return outputs
else:
n_steps = input_shape[1]
input = tf.reshape(input, tf.stack([n_batches, n_steps, -1]))
# flatten extra dimensions
shuffled_input = tf.transpose(input, (1, 0, 2))
shuffled_input.set_shape((None, None, self.input_shape[-1]))
hcs = tf.scan(
self.step,
elems=shuffled_input,
initializer=tf.concat(axis=1, values=[h0s, c0s]),
)
shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
shuffled_hs = shuffled_hcs[:, :, :self.num_units]
shuffled_cs = shuffled_hcs[:, :, self.num_units:]
return shuffled_hs
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_step_layer(self, l_in, l_prev_state, name=None):
return LSTMStepLayer(incomings=[l_in, l_prev_state], recurrent_layer=self, name=name)
def get_all_layers(layer, treat_as_input=None):
"""
:type layer: Layer | list[Layer]
:rtype: list[Layer]
"""
# We perform a depth-first search. We add a layer to the result list only
# after adding all its incoming layers (if any) or when detecting a cycle.
# We use a LIFO stack to avoid ever running into recursion depth limits.
try:
queue = deque(layer)
except TypeError:
queue = deque([layer])
seen = set()
done = set()
result = []
# If treat_as_input is given, we pretend we've already collected all their
# incoming layers.
if treat_as_input is not None:
seen.update(treat_as_input)
while queue:
# Peek at the leftmost node in the queue.
layer = queue[0]
if layer is None:
# Some node had an input_layer set to `None`. Just ignore it.
queue.popleft()
elif layer not in seen:
# We haven't seen this node yet: Mark it and queue all incomings
# to be processed first. If there are no incomings, the node will
# be appended to the result list in the next iteration.
seen.add(layer)
if hasattr(layer, 'input_layers'):
queue.extendleft(reversed(layer.input_layers))
elif hasattr(layer, 'input_layer'):
queue.appendleft(layer.input_layer)
else:
# We've been here before: Either we've finished all its incomings,
# or we've detected a cycle. In both cases, we remove the layer
# from the queue and append it to the result list.
queue.popleft()
if layer not in done:
result.append(layer)
done.add(layer)
return result
class NonlinearityLayer(Layer):
def __init__(self, incoming, nonlinearity=tf.nn.relu, **kwargs):
super(NonlinearityLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (tf.identity if nonlinearity is None
else nonlinearity)
def get_output_for(self, input, **kwargs):
return self.nonlinearity(input)
def get_output_shape_for(self, input_shape):
return input_shape
class BatchNormLayer(Layer):
def __init__(self, incoming, center=True, scale=False, epsilon=0.001, decay=0.9,
beta=tf.zeros_initializer(), gamma=tf.ones_initializer(), moving_mean=tf.zeros_initializer(),
moving_variance=tf.ones_initializer(), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
self.center = center
self.scale = scale
self.epsilon = epsilon
self.decay = decay
input_shape = incoming.output_shape
axis = list(range(len(input_shape) - 1))
params_shape = input_shape[-1:]
if center:
self.beta = self.add_param(beta, shape=params_shape, name='beta', trainable=True, regularizable=False)
else:
self.beta = None
if scale:
self.gamma = self.add_param(gamma, shape=params_shape, name='gamma', trainable=True, regularizable=True)
else:
self.gamma = None
self.moving_mean = self.add_param(moving_mean, shape=params_shape, name='moving_mean', trainable=False,
regularizable=False)
self.moving_variance = self.add_param(moving_variance, shape=params_shape, name='moving_variance',
trainable=False, regularizable=False)
self.axis = axis
def get_output_for(self, input, phase='train', **kwargs):
if phase == 'train':
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
# Update the moving_mean and moving_variance moments.
update_moving_mean = moving_averages.assign_moving_average(
self.moving_mean, mean, self.decay)
update_moving_variance = moving_averages.assign_moving_average(
self.moving_variance, variance, self.decay)
# Make sure the updates are computed here.
with tf.control_dependencies([update_moving_mean,
update_moving_variance]):
output = tf.nn.batch_normalization(
input, mean, variance, self.beta, self.gamma, self.epsilon)
else:
output = tf.nn.batch_normalization(
input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
output.set_shape(self.input_shape)
return output
def get_output_shape_for(self, input_shape):
return input_shape
def batch_norm(layer, **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
scale = True
if nonlinearity is not None:
layer.nonlinearity = tf.identity
if nonlinearity is tf.nn.relu:
scale = False
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
bn_name = (kwargs.pop('name', None) or
(getattr(layer, 'name', None) and layer.name + '_bn'))
layer = BatchNormLayer(layer, name=bn_name, scale=scale, **kwargs)
if nonlinearity is not None:
nonlin_name = bn_name and bn_name + '_nonlin'
layer = NonlinearityLayer(layer, nonlinearity=nonlinearity, name=nonlin_name)
return layer
class ElemwiseSumLayer(MergeLayer):
def __init__(self, incomings, **kwargs):
super(ElemwiseSumLayer, self).__init__(incomings, **kwargs)
def get_output_for(self, inputs, **kwargs):
return functools.reduce(tf.add, inputs)
def get_output_shape_for(self, input_shapes):
assert len(set(input_shapes)) == 1
return input_shapes[0]
def get_output(layer_or_layers, inputs=None, **kwargs):
# track accepted kwargs used by get_output_for
accepted_kwargs = {'deterministic'}
# obtain topological ordering of all layers the output layer(s) depend on
treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
# initialize layer-to-expression mapping from all input layers
all_outputs = dict((layer, layer.input_var)
for layer in all_layers
if isinstance(layer, InputLayer) and
layer not in treat_as_input)
# update layer-to-expression mapping from given input(s), if any
if isinstance(inputs, dict):
all_outputs.update((layer, tf.convert_to_tensor(expr))
for layer, expr in list(inputs.items()))
elif inputs is not None:
if len(all_outputs) > 1:
raise ValueError("get_output() was called with a single input "
"expression on a network with multiple input "
"layers. Please call it with a dictionary of "
"input expressions instead.")
for input_layer in all_outputs:
all_outputs[input_layer] = tf.convert_to_tensor(inputs)
# update layer-to-expression mapping by propagating the inputs
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer]
for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
# one of the input_layer attributes must have been `None`
raise ValueError("get_output() was called without giving an "
"input expression for the free-floating "
"layer %r. Please call it with a dictionary "
"mapping this layer to an input expression."
% layer)
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
names, _, _, defaults = getargspec(layer.get_output_for)
except TypeError:
# If introspection is not possible, skip it
pass
else:
if defaults is not None:
accepted_kwargs |= set(names[-len(defaults):])
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = set(kwargs.keys()) - accepted_kwargs
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append('%s (perhaps you meant %s)'
% (kwarg, suggestion[0]))
else:
suggestions.append(kwarg)
warn("get_output() was called with unused kwargs:\n\t%s"
% "\n\t".join(suggestions))
# return the output(s) of the requested layer(s) only
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
def unique(l):
"""Filters duplicates of iterable.
Create a new list from l with duplicate entries removed,
while preserving the original order.
Parameters
----------
l : iterable
Input iterable to filter of duplicates.
Returns
-------
list
A list of elements of `l` without duplicates and in the same order.
"""
new_list = []
seen = set()
for el in l:
if el not in seen:
new_list.append(el)
seen.add(el)
return new_list
def get_all_params(layer, **tags):
"""
:type layer: Layer|list[Layer]
"""
layers = get_all_layers(layer)
params = chain.from_iterable(l.get_params(**tags) for l in layers)
return unique(params)
| 76,922 | 40.557536 | 120 | py |
rllab | rllab-master/sandbox/rocky/tf/core/layers_powered.py | from sandbox.rocky.tf.core.parameterized import Parameterized
import sandbox.rocky.tf.core.layers as L
import itertools
class LayersPowered(Parameterized):
def __init__(self, output_layers, input_layers=None):
self._output_layers = output_layers
self._input_layers = input_layers
Parameterized.__init__(self)
def get_params_internal(self, **tags):
layers = L.get_all_layers(self._output_layers, treat_as_input=self._input_layers)
params = itertools.chain.from_iterable(l.get_params(**tags) for l in layers)
return L.unique(params)
| 592 | 31.944444 | 89 | py |
rllab | rllab-master/sandbox/rocky/tf/core/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/core/parameterized.py | from contextlib import contextmanager
from rllab.core.serializable import Serializable
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import tensorflow as tf
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
self._cached_assign_ops = {}
self._cached_assign_placeholders = {}
def get_params_internal(self, **tags):
"""
Internal method to be implemented which does not perform caching
"""
raise NotImplementedError
def get_params(self, **tags):
"""
Get the list of parameters, filtered by the provided tags.
Some common tags include 'regularizable' and 'trainable'
"""
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_params:
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_dtypes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_shapes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
if param not in self._cached_assign_ops:
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print("setting value of %s" % param.name)
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d["params"] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.variables_initializer(self.get_params()))
self.set_param_values(d["params"])
class JointParameterized(Parameterized):
def __init__(self, components):
super(JointParameterized, self).__init__()
self.components = components
def get_params_internal(self, **tags):
params = [param for comp in self.components for param in comp.get_params_internal(**tags)]
# only return unique parameters
return sorted(set(params), key=hash)
| 4,226 | 37.081081 | 98 | py |
rllab | rllab-master/sandbox/rocky/tf/envs/parallel_vec_env_executor.py |
import numpy as np
import pickle as pickle
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc import logger
from rllab.sampler.stateful_pool import singleton_pool
import uuid
def worker_init_envs(G, alloc, scope, env):
logger.log("initializing environment on worker %d" % G.worker_id)
if not hasattr(G, 'parallel_vec_envs'):
G.parallel_vec_envs = dict()
G.parallel_vec_env_template = dict()
G.parallel_vec_envs[scope] = [(idx, pickle.loads(pickle.dumps(env))) for idx in alloc]
G.parallel_vec_env_template[scope] = env
# For these two methods below, we pack the data into batch numpy arrays whenever possible, to reduce communication cost
def worker_run_reset(G, flags, scope):
if not hasattr(G, 'parallel_vec_envs'):
logger.log("on worker %d" % G.worker_id)
import traceback
for line in traceback.format_stack():
logger.log(line)
# log the stacktrace at least
logger.log("oops")
for k, v in G.__dict__.items():
logger.log(str(k) + " : " + str(v))
assert hasattr(G, 'parallel_vec_envs')
assert scope in G.parallel_vec_envs
N = len(G.parallel_vec_envs[scope])
env_template = G.parallel_vec_env_template[scope]
obs_dim = env_template.observation_space.flat_dim
ret_arr = np.zeros((N, obs_dim))
ids = []
flat_obs = []
reset_ids = []
for itr_idx, (idx, env) in enumerate(G.parallel_vec_envs[scope]):
flag = flags[idx]
if flag:
flat_obs.append(env.reset())
reset_ids.append(itr_idx)
ids.append(idx)
if len(reset_ids) > 0:
ret_arr[reset_ids] = env_template.observation_space.flatten_n(flat_obs)
return ids, ret_arr
def worker_run_step(G, action_n, scope):
assert hasattr(G, 'parallel_vec_envs')
assert scope in G.parallel_vec_envs
env_template = G.parallel_vec_env_template[scope]
ids = []
step_results = []
for (idx, env) in G.parallel_vec_envs[scope]:
action = action_n[idx]
ids.append(idx)
step_results.append(tuple(env.step(action)))
if len(step_results) == 0:
return None
obs, rewards, dones, env_infos = list(map(list, list(zip(*step_results))))
obs = env_template.observation_space.flatten_n(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
env_infos = tensor_utils.stack_tensor_dict_list(env_infos)
return ids, obs, rewards, dones, env_infos
def worker_collect_env_time(G):
return G.env_time
class ParallelVecEnvExecutor(object):
def __init__(self, env, n, max_path_length, scope=None):
if scope is None:
# initialize random scope
scope = str(uuid.uuid4())
envs_per_worker = int(np.ceil(n * 1.0 / singleton_pool.n_parallel))
alloc_env_ids = []
rest_alloc = n
start_id = 0
for _ in range(singleton_pool.n_parallel):
n_allocs = min(envs_per_worker, rest_alloc)
alloc_env_ids.append(list(range(start_id, start_id + n_allocs)))
start_id += n_allocs
rest_alloc = max(0, rest_alloc - envs_per_worker)
singleton_pool.run_each(worker_init_envs, [(alloc, scope, env) for alloc in alloc_env_ids])
self._alloc_env_ids = alloc_env_ids
self._action_space = env.action_space
self._observation_space = env.observation_space
self._num_envs = n
self.scope = scope
self.ts = np.zeros(n, dtype='int')
self.max_path_length = max_path_length
def step(self, action_n):
results = singleton_pool.run_each(
worker_run_step,
[(action_n, self.scope) for _ in self._alloc_env_ids],
)
results = [x for x in results if x is not None]
ids, obs, rewards, dones, env_infos = list(zip(*results))
ids = np.concatenate(ids)
obs = self.observation_space.unflatten_n(np.concatenate(obs))
rewards = np.concatenate(rewards)
dones = np.concatenate(dones)
env_infos = tensor_utils.split_tensor_dict_list(tensor_utils.concat_tensor_dict_list(env_infos))
if env_infos is None:
env_infos = [dict() for _ in range(self.num_envs)]
items = list(zip(ids, obs, rewards, dones, env_infos))
items = sorted(items, key=lambda x: x[0])
ids, obs, rewards, dones, env_infos = list(zip(*items))
obs = list(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
self.ts += 1
dones[self.ts >= self.max_path_length] = True
reset_obs = self._run_reset(dones)
for (i, done) in enumerate(dones):
if done:
obs[i] = reset_obs[i]
self.ts[i] = 0
return obs, rewards, dones, tensor_utils.stack_tensor_dict_list(list(env_infos))
def _run_reset(self, dones):
dones = np.asarray(dones)
results = singleton_pool.run_each(
worker_run_reset,
[(dones, self.scope) for _ in self._alloc_env_ids],
)
ids, flat_obs = list(map(np.concatenate, list(zip(*results))))
zipped = list(zip(ids, flat_obs))
sorted_obs = np.asarray([x[1] for x in sorted(zipped, key=lambda x: x[0])])
done_ids, = np.where(dones)
done_flat_obs = sorted_obs[done_ids]
done_unflat_obs = self.observation_space.unflatten_n(done_flat_obs)
all_obs = [None] * self.num_envs
done_cursor = 0
for idx, done in enumerate(dones):
if done:
all_obs[idx] = done_unflat_obs[done_cursor]
done_cursor += 1
return all_obs
def reset(self):
dones = np.asarray([True] * self.num_envs)
return self._run_reset(dones)
@property
def num_envs(self):
return self._num_envs
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def terminate(self):
pass
| 6,057 | 33.225989 | 119 | py |
rllab | rllab-master/sandbox/rocky/tf/envs/base.py | from rllab.envs.proxy_env import ProxyEnv
from rllab.envs.base import EnvSpec
from rllab.spaces.box import Box as TheanoBox
from rllab.spaces.discrete import Discrete as TheanoDiscrete
from rllab.spaces.product import Product as TheanoProduct
from sandbox.rocky.tf.spaces.discrete import Discrete
from sandbox.rocky.tf.spaces.box import Box
from sandbox.rocky.tf.spaces.product import Product
from cached_property import cached_property
def to_tf_space(space):
if isinstance(space, TheanoBox):
return Box(low=space.low, high=space.high)
elif isinstance(space, TheanoDiscrete):
return Discrete(space.n)
elif isinstance(space, TheanoProduct):
return Product(list(map(to_tf_space, space.components)))
else:
raise NotImplementedError
class WrappedCls(object):
def __init__(self, cls, env_cls, extra_kwargs):
self.cls = cls
self.env_cls = env_cls
self.extra_kwargs = extra_kwargs
def __call__(self, *args, **kwargs):
return self.cls(self.env_cls(*args, **dict(self.extra_kwargs, **kwargs)))
class TfEnv(ProxyEnv):
@cached_property
def observation_space(self):
return to_tf_space(self.wrapped_env.observation_space)
@cached_property
def action_space(self):
return to_tf_space(self.wrapped_env.action_space)
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def vectorized(self):
return getattr(self.wrapped_env, "vectorized", False)
def vec_env_executor(self, n_envs, max_path_length):
return VecTfEnv(self.wrapped_env.vec_env_executor(n_envs=n_envs, max_path_length=max_path_length))
@classmethod
def wrap(cls, env_cls, **extra_kwargs):
# Use a class wrapper rather than a lambda method for smoother serialization
return WrappedCls(cls, env_cls, extra_kwargs)
class VecTfEnv(object):
def __init__(self, vec_env):
self.vec_env = vec_env
def reset(self):
return self.vec_env.reset()
@property
def num_envs(self):
return self.vec_env.num_envs
def step(self, action_n):
return self.vec_env.step(action_n)
def terminate(self):
self.vec_env.terminate()
| 2,330 | 28.506329 | 106 | py |
rllab | rllab-master/sandbox/rocky/tf/envs/vec_env_executor.py |
import numpy as np
import pickle as pickle
from sandbox.rocky.tf.misc import tensor_utils
class VecEnvExecutor(object):
def __init__(self, envs, max_path_length):
self.envs = envs
self._action_space = envs[0].action_space
self._observation_space = envs[0].observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
self.max_path_length = max_path_length
def step(self, action_n):
all_results = [env.step(a) for (a, env) in zip(action_n, self.envs)]
obs, rewards, dones, env_infos = list(map(list, list(zip(*all_results))))
dones = np.asarray(dones)
rewards = np.asarray(rewards)
self.ts += 1
if self.max_path_length is not None:
dones[self.ts >= self.max_path_length] = True
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return obs, rewards, dones, tensor_utils.stack_tensor_dict_list(env_infos)
def reset(self):
results = [env.reset() for env in self.envs]
self.ts[:] = 0
return results
@property
def num_envs(self):
return len(self.envs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def terminate(self):
pass
| 1,412 | 27.836735 | 82 | py |
rllab | rllab-master/sandbox/rocky/tf/envs/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/distributions/recurrent_diagonal_gaussian.py |
from sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian
RecurrentDiagonalGaussian = DiagonalGaussian
| 127 | 17.285714 | 77 | py |
rllab | rllab-master/sandbox/rocky/tf/distributions/base.py |
class Distribution(object):
@property
def dim(self):
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two distributions
"""
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two distributions
"""
raise NotImplementedError
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def log_likelihood(self, xs, dist_info):
raise NotImplementedError
@property
def dist_info_specs(self):
raise NotImplementedError
@property
def dist_info_keys(self):
return [k for k, _ in self.dist_info_specs]
| 982 | 22.97561 | 82 | py |
rllab | rllab-master/sandbox/rocky/tf/distributions/categorical.py | import numpy as np
from .base import Distribution
import tensorflow as tf
from sandbox.rocky.tf.misc import tensor_utils
TINY = 1e-8
def from_onehot(x_var):
ret = np.zeros((len(x_var),), 'int32')
nonzero_n, nonzero_a = np.nonzero(x_var)
ret[nonzero_n] = nonzero_a
return ret
class Categorical(Distribution):
def __init__(self, dim):
self._dim = dim
weights_var = tf.placeholder(
dtype=tf.float32,
shape=(None, dim),
name="weights"
)
self._f_sample = tensor_utils.compile_function(
inputs=[weights_var],
outputs=tf.multinomial(tf.log(weights_var + 1e-8), num_samples=1)[:, 0],
)
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(
old_prob_var * (tf.log(old_prob_var + TINY) - tf.log(new_prob_var + TINY)),
axis=ndims - 1
)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=-1
)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
x_var = tf.cast(x_var, tf.float32)
# Assume layout is N * A
return (tf.reduce_sum(new_prob_var * x_var, ndims - 1) + TINY) / \
(tf.reduce_sum(old_prob_var * x_var, ndims - 1) + TINY)
def entropy_sym(self, dist_info_vars):
probs = dist_info_vars["prob"]
return -tf.reduce_sum(probs * tf.log(probs + TINY), axis=1)
def cross_entropy_sym(self, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(
old_prob_var * (- tf.log(new_prob_var + TINY)),
axis=ndims - 1
)
def entropy(self, info):
probs = info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=1)
def log_likelihood_sym(self, x_var, dist_info_vars):
probs = dist_info_vars["prob"]
ndims = probs.get_shape().ndims
return tf.log(tf.reduce_sum(probs * tf.cast(x_var, tf.float32), ndims - 1) + TINY)
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * A
return np.log(np.sum(probs * xs, axis=-1) + TINY)
@property
def dist_info_specs(self):
return [("prob", (self.dim,))]
def sample(self, dist_info):
return self._f_sample(dist_info["prob"])
def sample_sym(self, dist_info):
probs = dist_info["prob"]
samples = tf.multinomial(tf.log(probs + 1e-8), num_samples=1)[:, 0]
return tf.nn.embedding_lookup(np.eye(self.dim, dtype=np.float32), samples)
| 3,514 | 32.47619 | 90 | py |
rllab | rllab-master/sandbox/rocky/tf/distributions/recurrent_categorical.py | import tensorflow as tf
import numpy as np
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.distributions.base import Distribution
TINY = 1e-8
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
return tf.reduce_sum(
old_prob_var * (tf.log(old_prob_var + TINY) - tf.log(new_prob_var + TINY)),
axis=2
)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=2
)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = tf.shape(x_var)[2]
flat_ratios = self._cat.likelihood_ratio_sym(
tf.reshape(x_var, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(old_prob_var, tf.stack([-1, a_dim]))),
dict(prob=tf.reshape(new_prob_var, tf.stack([-1, a_dim])))
)
return tf.reshape(flat_ratios, tf.shape(old_prob_var)[:2])
def entropy(self, dist_info):
probs = dist_info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=2)
def entropy_sym(self, dist_info_vars):
probs = dist_info_vars["prob"]
return -tf.reduce_sum(probs * tf.log(probs + TINY), 2)
def log_likelihood_sym(self, xs, dist_info_vars):
probs = dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
# a_dim = TT.printing.Print("lala")(a_dim)
flat_logli = self._cat.log_likelihood_sym(
tf.reshape(xs, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(probs, tf.stack((-1, a_dim))))
)
return tf.reshape(flat_logli, tf.shape(probs)[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(
xs.reshape((-1, a_dim)),
dict(prob=probs.reshape((-1, a_dim)))
)
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_specs(self):
return [("prob", (self.dim,))]
| 2,923 | 33 | 87 | py |
rllab | rllab-master/sandbox/rocky/tf/distributions/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/distributions/diagonal_gaussian.py |
import tensorflow as tf
import numpy as np
from sandbox.rocky.tf.distributions.base import Distribution
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info["mean"]
old_log_stds = old_dist_info["log_std"]
new_means = new_dist_info["mean"]
new_log_stds = new_dist_info["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = np.square(old_means - new_means) + \
np.square(old_std) - np.square(new_std)
denominator = 2 * np.square(new_std) + 1e-8
return np.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
# more lossy version
# return TT.sum(
# numerator / denominator + TT.log(new_std) - TT.log(old_std ), axis=-1)
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = tf.exp(old_log_stds)
new_std = tf.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = tf.square(old_means - new_means) + \
tf.square(old_std) - tf.square(new_std)
denominator = 2 * tf.square(new_std) + 1e-8
return tf.reduce_sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return tf.exp(logli_new - logli_old)
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars["mean"]
log_stds = dist_info_vars["log_std"]
zs = (x_var - means) / tf.exp(log_stds)
return - tf.reduce_sum(log_stds, axis=-1) - \
0.5 * tf.reduce_sum(tf.square(zs), axis=-1) - \
0.5 * self.dim * np.log(2 * np.pi)
def sample(self, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
rnd = np.random.normal(size=means.shape)
return rnd * np.exp(log_stds) + means
def log_likelihood(self, xs, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
zs = (xs - means) / np.exp(log_stds)
return - np.sum(log_stds, axis=-1) - \
0.5 * np.sum(np.square(zs), axis=-1) - \
0.5 * self.dim * np.log(2 * np.pi)
def entropy(self, dist_info):
log_stds = dist_info["log_std"]
return np.sum(log_stds + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
@property
def dist_info_specs(self):
return [("mean", (self.dim,)), ("log_std", (self.dim,))]
| 3,627 | 36.020408 | 84 | py |
rllab | rllab-master/sandbox/rocky/tf/distributions/bernoulli.py |
from .base import Distribution
import tensorflow as tf
import numpy as np
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
kl = old_p * (tf.log(old_p + TINY) - tf.log(new_p + TINY)) + \
(1 - old_p) * (tf.log(1 - old_p + TINY) - tf.log(1 - new_p + TINY))
ndims = kl.get_shape().ndims
return tf.reduce_sum(kl, axis=ndims - 1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info["p"]
new_p = new_dist_info["p"]
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) + \
(1 - old_p) * (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info["p"])
return np.cast['int'](np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
ndims = old_p.get_shape().ndims
return tf.reduce_prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
axis=ndims - 1)
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars["p"]
ndims = p.get_shape().ndims
return tf.reduce_sum(x_var * tf.log(p + TINY) + (1 - x_var) * tf.log(1 - p + TINY), axis=ndims - 1)
def log_likelihood(self, xs, dist_info):
p = dist_info["p"]
return np.sum(xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY), axis=-1)
def entropy(self, dist_info):
p = dist_info["p"]
return np.sum(- p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY), axis=-1)
@property
def dist_info_keys(self):
return ["p"]
| 2,050 | 33.183333 | 110 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/base.py |
from sandbox.rocky.tf.core.parameterized import Parameterized
class Policy(Parameterized):
def __init__(self, env_spec):
Parameterized.__init__(self)
self._env_spec = env_spec
# Should be implemented by all policies
def get_action(self, observation):
raise NotImplementedError
def get_actions(self, observations):
raise NotImplementedError
def reset(self, dones=None):
pass
@property
def vectorized(self):
"""
Indicates whether the policy is vectorized. If True, it should implement get_actions(), and support resetting
with multiple simultaneous states.
"""
return False
@property
def observation_space(self):
return self._env_spec.observation_space
@property
def action_space(self):
return self._env_spec.action_space
@property
def env_spec(self):
return self._env_spec
@property
def recurrent(self):
"""
Indicates whether the policy is recurrent.
:return:
"""
return False
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@property
def state_info_keys(self):
"""
Return keys for the information related to the policy's state when taking an action.
:return:
"""
return [k for k, _ in self.state_info_specs]
@property
def state_info_specs(self):
"""
Return keys and shapes for the information related to the policy's state when taking an action.
:return:
"""
return list()
def terminate(self):
"""
Clean up operation
"""
pass
class StochasticPolicy(Policy):
@property
def distribution(self):
"""
:rtype Distribution
"""
raise NotImplementedError
def dist_info_sym(self, obs_var, state_info_vars):
"""
Return the symbolic distribution information about the actions.
:param obs_var: symbolic variable for observations
:param state_info_vars: a dictionary whose values should contain information about the state of the policy at
the time it received the observation
:return:
"""
raise NotImplementedError
def dist_info(self, obs, state_infos):
"""
Return the distribution information about the actions.
:param obs_var: observation values
:param state_info_vars: a dictionary whose values should contain information about the state of the policy at
the time it received the observation
:return:
"""
raise NotImplementedError
| 2,755 | 24.757009 | 117 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/uniform_control_policy.py | from sandbox.rocky.tf.policies.base import Policy
from rllab.core.serializable import Serializable
class UniformControlPolicy(Policy, Serializable):
def __init__(
self,
env_spec,
):
Serializable.quick_init(self, locals())
super(UniformControlPolicy, self).__init__(env_spec=env_spec)
@property
def vectorized(self):
return True
def get_action(self, observation):
return self.action_space.sample(), dict()
def get_actions(self, observations):
return self.action_space.sample_n(len(observations)), dict()
def get_params_internal(self, **tags):
return []
| 658 | 25.36 | 69 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/categorical_gru_policy.py | import numpy as np
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import GRUNetwork, MLP
from sandbox.rocky.tf.distributions.recurrent_categorical import RecurrentCategorical
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.spaces.discrete import Discrete
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.core.serializable import Serializable
from rllab.misc import special
from rllab.misc.overrides import overrides
class CategoricalGRUPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=tf.tanh,
gru_layer_cls=L.GRULayer,
):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
with tf.variable_scope(name):
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(
shape=(None, None, input_dim),
name="input"
)
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: tf.reshape(
flat_feature,
tf.stack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
),
shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
)
prob_network = GRUNetwork(
input_shape=(feature_dim,),
input_layer=l_feature,
output_dim=env_spec.action_space.n,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax,
gru_layer_cls=gru_layer_cls,
name="prob_network"
)
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = tensor_utils.compile_function(
[
flat_input_var,
prob_network.step_prev_hidden_layer.input_var
],
L.get_output([
prob_network.step_output_layer,
prob_network.step_hidden_layer
], {prob_network.step_input_layer: feature_var})
)
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))
obs_var = tf.cast(obs_var, tf.float32)
if self.state_include_action:
prev_action_var = tf.cast(state_info_vars["prev_action"], tf.float32)
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if self.feature_network is None:
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var}
)
)
else:
flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
)
)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if dones is None:
dones = [True]
dones = np.asarray(dones)
if self.prev_actions is None or len(dones) != len(self.prev_actions):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.
self.prev_hiddens[dones] = self.prob_network.hid_init_param.eval() # get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert self.prev_actions is not None
all_input = np.concatenate([
flat_obs,
self.prev_actions
], axis=-1)
else:
all_input = flat_obs
probs, hidden_vec = self.f_step_prob(all_input, self.prev_hiddens)
actions = special.weighted_sample_n(probs, np.arange(self.action_space.n))
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info["prev_action"] = np.copy(prev_actions)
return actions, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [
("prev_action", (self.action_dim,)),
]
else:
return []
| 7,649 | 36.317073 | 105 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/categorical_mlp_policy.py | from sandbox.rocky.tf.core.layers_powered import LayersPowered
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.network import MLP
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.misc import ext
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.spaces.discrete import Discrete
import tensorflow as tf
class CategoricalMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
with tf.variable_scope(name):
if prob_network is None:
prob_network = MLP(
input_shape=(env_spec.observation_space.flat_dim,),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax,
name="prob_network",
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = tensor_utils.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer)
)
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMLPPolicy, self).__init__(env_spec)
LayersPowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: tf.cast(obs_var, tf.float32)}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
| 3,395 | 36.733333 | 97 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/categorical_lstm_policy.py | import numpy as np
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import LSTMNetwork, MLP
from sandbox.rocky.tf.distributions.recurrent_categorical import RecurrentCategorical
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.spaces.discrete import Discrete
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.core.serializable import Serializable
from rllab.misc import special
from rllab.misc.overrides import overrides
class CategoricalLSTMPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_dim=32,
feature_network=None,
prob_network=None,
state_include_action=True,
hidden_nonlinearity=tf.tanh,
forget_bias=1.0,
use_peepholes=False,
lstm_layer_cls=L.LSTMLayer
):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
with tf.variable_scope(name):
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalLSTMPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(
shape=(None, None, input_dim),
name="input"
)
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: tf.reshape(
flat_feature,
tf.stack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
),
shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
)
if prob_network is None:
prob_network = LSTMNetwork(
input_shape=(feature_dim,),
input_layer=l_feature,
output_dim=env_spec.action_space.n,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax,
forget_bias=forget_bias,
use_peepholes=use_peepholes,
lstm_layer_cls=lstm_layer_cls,
name="prob_network"
)
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = tensor_utils.compile_function(
[
flat_input_var,
prob_network.step_prev_hidden_layer.input_var,
prob_network.step_prev_cell_layer.input_var
],
L.get_output([
prob_network.step_output_layer,
prob_network.step_hidden_layer,
prob_network.step_cell_layer
], {prob_network.step_input_layer: feature_var})
)
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.prev_cells = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))
obs_var = tf.cast(obs_var, tf.float32)
if self.state_include_action:
prev_action_var = state_info_vars["prev_action"]
prev_action_var = tf.cast(prev_action_var, tf.float32)
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if self.feature_network is None:
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var}
)
)
else:
flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
)
)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if dones is None:
dones = [True]
dones = np.asarray(dones)
if self.prev_actions is None or len(dones) != len(self.prev_actions):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_cells = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.
self.prev_hiddens[dones] = self.prob_network.hid_init_param.eval()
self.prev_cells[dones] = self.prob_network.cell_init_param.eval()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert self.prev_actions is not None
all_input = np.concatenate([
flat_obs,
self.prev_actions
], axis=-1)
else:
all_input = flat_obs
probs, hidden_vec, cell_vec = self.f_step_prob(all_input, self.prev_hiddens, self.prev_cells)
actions = special.weighted_sample_n(probs, np.arange(self.action_space.n))
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
self.prev_cells = cell_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info["prev_action"] = np.copy(prev_actions)
return actions, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [
("prev_action", (self.action_dim,)),
]
else:
return []
| 8,307 | 37.110092 | 105 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/gaussian_mlp_policy.py | import numpy as np
from sandbox.rocky.tf.core.layers_powered import LayersPowered
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.spaces.box import Box
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.policies.base import StochasticPolicy
from sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.misc.overrides import overrides
from rllab.misc import logger
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
class GaussianMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
min_std=1e-6,
std_hidden_nonlinearity=tf.nn.tanh,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
mean_network=None,
std_network=None,
std_parametrization='exp'
):
"""
:param env_spec:
:param hidden_sizes: list of sizes for the fully-connected hidden layers
:param learn_std: Is std trainable
:param init_std: Initial std
:param adaptive_std:
:param std_share_network:
:param std_hidden_sizes: list of sizes for the fully-connected layers for std
:param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
:param std_hidden_nonlinearity:
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param output_nonlinearity: nonlinearity for the output layer
:param mean_network: custom network for the output mean
:param std_network: custom network for the output log std
:param std_parametrization: how the std should be parametrized. There are a few options:
- exp: the logarithm of the std will be stored, and applied a exponential transformation
- softplus: the std will be computed as log(1+exp(x))
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
with tf.variable_scope(name):
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
# create network
if mean_network is None:
mean_network = MLP(
name="mean_network",
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if std_network is not None:
l_std_param = std_network.output_layer
else:
if adaptive_std:
std_network = MLP(
name="std_network",
input_shape=(obs_dim,),
input_layer=mean_network.input_layer,
output_dim=action_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_hidden_nonlinearity,
output_nonlinearity=None,
)
l_std_param = std_network.output_layer
else:
if std_parametrization == 'exp':
init_std_param = np.log(init_std)
elif std_parametrization == 'softplus':
init_std_param = np.log(np.exp(init_std) - 1)
else:
raise NotImplementedError
l_std_param = L.ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=tf.constant_initializer(init_std_param),
name="output_std_param",
trainable=learn_std,
)
self.std_parametrization = std_parametrization
if std_parametrization == 'exp':
min_std_param = np.log(min_std)
elif std_parametrization == 'softplus':
min_std_param = np.log(np.exp(min_std) - 1)
else:
raise NotImplementedError
self.min_std_param = min_std_param
# mean_var, log_std_var = L.get_output([l_mean, l_std_param])
#
# if self.min_std_param is not None:
# log_std_var = tf.maximum(log_std_var, np.log(min_std))
#
# self._mean_var, self._log_std_var = mean_var, log_std_var
self._l_mean = l_mean
self._l_std_param = l_std_param
self._dist = DiagonalGaussian(action_dim)
LayersPowered.__init__(self, [l_mean, l_std_param])
super(GaussianMLPPolicy, self).__init__(env_spec)
dist_info_sym = self.dist_info_sym(mean_network.input_layer.input_var, dict())
mean_var = dist_info_sym["mean"]
log_std_var = dist_info_sym["log_std"]
self._f_dist = tensor_utils.compile_function(
inputs=[obs_var],
outputs=[mean_var, log_std_var],
)
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None):
mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
if self.min_std_param is not None:
std_param_var = tf.maximum(std_param_var, self.min_std_param)
if self.std_parametrization == 'exp':
log_std_var = std_param_var
elif self.std_parametrization == 'softplus':
log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
else:
raise NotImplementedError
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
mean, log_std = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
return action, dict(mean=mean, log_std=log_std)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
means, log_stds = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
return actions, dict(mean=means, log_std=log_stds)
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
"""
Given observations, old actions, and distribution of old actions, return a symbolically reparameterized
representation of the actions in terms of the policy parameters
:param obs_var:
:param action_var:
:param old_dist_info_vars:
:return:
"""
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
new_mean_var, new_log_std_var = new_dist_info_vars["mean"], new_dist_info_vars["log_std"]
old_mean_var, old_log_std_var = old_dist_info_vars["mean"], old_dist_info_vars["log_std"]
epsilon_var = (action_var - old_mean_var) / (tf.exp(old_log_std_var) + 1e-8)
new_action_var = new_mean_var + epsilon_var * tf.exp(new_log_std_var)
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path["agent_infos"]["log_std"] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
@property
def distribution(self):
return self._dist
| 8,050 | 40.076531 | 117 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/gaussian_lstm_policy.py | import numpy as np
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import LSTMNetwork
from sandbox.rocky.tf.distributions.recurrent_diagonal_gaussian import RecurrentDiagonalGaussian
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
class GaussianLSTMPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=tf.tanh,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
lstm_layer_cls=L.LSTMLayer,
use_peepholes=False,
):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
with tf.variable_scope(name):
Serializable.quick_init(self, locals())
super(GaussianLSTMPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(
shape=(None, None, input_dim),
name="input"
)
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: tf.reshape(
flat_feature,
tf.stack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
),
shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
)
mean_network = LSTMNetwork(
input_shape=(feature_dim,),
input_layer=l_feature,
output_dim=action_dim,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
lstm_layer_cls=lstm_layer_cls,
name="mean_network",
use_peepholes=use_peepholes,
)
l_log_std = L.ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=tf.constant_initializer(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = L.ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self.mean_network = mean_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_mean_std = tensor_utils.compile_function(
[
flat_input_var,
mean_network.step_prev_state_layer.input_var,
],
L.get_output([
mean_network.step_output_layer,
l_step_log_std,
mean_network.step_hidden_layer,
mean_network.step_cell_layer
], {mean_network.step_input_layer: feature_var})
)
self.l_log_std = l_log_std
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.prev_cells = None
self.dist = RecurrentDiagonalGaussian(action_dim)
out_layers = [mean_network.output_layer, l_log_std]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))
if self.state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if self.feature_network is None:
means, log_stds = L.get_output(
[self.mean_network.output_layer, self.l_log_std],
{self.l_input: all_input_var}
)
else:
flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
means, log_stds = L.get_output(
[self.mean_network.output_layer, self.l_log_std],
{self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
)
return dict(mean=means, log_std=log_stds)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if dones is None:
dones = [True]
dones = np.asarray(dones)
if self.prev_actions is None or len(dones) != len(self.prev_actions):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_cells = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.
self.prev_hiddens[dones] = self.mean_network.hid_init_param.eval()
self.prev_cells[dones] = self.mean_network.cell_init_param.eval()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert self.prev_actions is not None
all_input = np.concatenate([
flat_obs,
self.prev_actions
], axis=-1)
else:
all_input = flat_obs
# probs, hidden_vec, cell_vec = self.f_step_prob(all_input, self.prev_hiddens, self.prev_cells)
means, log_stds, hidden_vec, cell_vec = self.f_step_mean_std(
all_input, np.hstack([self.prev_hiddens, self.prev_cells]))
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
self.prev_cells = cell_vec
agent_info = dict(mean=means, log_std=log_stds)
if self.state_include_action:
agent_info["prev_action"] = np.copy(prev_actions)
return actions, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [
("prev_action", (self.action_dim,)),
]
else:
return []
| 8,680 | 36.743478 | 105 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/gaussian_gru_policy.py | import numpy as np
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import GRUNetwork
from sandbox.rocky.tf.distributions.recurrent_diagonal_gaussian import RecurrentDiagonalGaussian
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
from rllab.misc import logger
class GaussianGRUPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=tf.tanh,
gru_layer_cls=L.GRULayer,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
with tf.variable_scope(name):
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(
shape=(None, None, input_dim),
name="input"
)
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: tf.reshape(
flat_feature,
tf.stack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
),
shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
)
mean_network = GRUNetwork(
input_shape=(feature_dim,),
input_layer=l_feature,
output_dim=action_dim,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
gru_layer_cls=gru_layer_cls,
name="mean_network"
)
l_log_std = L.ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=tf.constant_initializer(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = L.ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self.mean_network = mean_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_mean_std = tensor_utils.compile_function(
[
flat_input_var,
mean_network.step_prev_state_layer.input_var,
],
L.get_output([
mean_network.step_output_layer,
l_step_log_std,
mean_network.step_hidden_layer,
], {mean_network.step_input_layer: feature_var})
)
self.l_log_std = l_log_std
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_actions = None
self.prev_hiddens = None
self.dist = RecurrentDiagonalGaussian(action_dim)
out_layers = [mean_network.output_layer, l_log_std, l_step_log_std]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches = tf.shape(obs_var)[0]
n_steps = tf.shape(obs_var)[1]
obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))
if self.state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = tf.concat(axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if self.feature_network is None:
means, log_stds = L.get_output(
[self.mean_network.output_layer, self.l_log_std],
{self.l_input: all_input_var}
)
else:
flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
means, log_stds = L.get_output(
[self.mean_network.output_layer, self.l_log_std],
{self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
)
return dict(mean=means, log_std=log_stds)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if dones is None:
dones = [True]
dones = np.asarray(dones)
if self.prev_actions is None or len(dones) != len(self.prev_actions):
self.prev_actions = np.zeros((len(dones), self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.
self.prev_hiddens[dones] = self.mean_network.hid_init_param.eval()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert self.prev_actions is not None
all_input = np.concatenate([
flat_obs,
self.prev_actions
], axis=-1)
else:
all_input = flat_obs
means, log_stds, hidden_vec = self.f_step_mean_std(all_input, self.prev_hiddens)
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
agent_info = dict(mean=means, log_std=log_stds)
if self.state_include_action:
agent_info["prev_action"] = np.copy(prev_actions)
return actions, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [
("prev_action", (self.action_dim,)),
]
else:
return []
def log_diagnostics(self, paths):
log_stds = np.vstack([path["agent_infos"]["log_std"] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
| 8,416 | 36.243363 | 105 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/policies/categorical_conv_policy.py | from sandbox.rocky.tf.core.layers_powered import LayersPowered
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.network import ConvNetwork
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import StochasticPolicy
from rllab.misc import ext
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.spaces.discrete import Discrete
import tensorflow as tf
class CategoricalConvPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
conv_filters, conv_filter_sizes, conv_strides, conv_pads,
hidden_sizes=[],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.softmax,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
self._env_spec = env_spec
# import pdb; pdb.set_trace()
if prob_network is None:
prob_network = ConvNetwork(
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.n,
conv_filters=conv_filters,
conv_filter_sizes=conv_filter_sizes,
conv_strides=conv_strides,
conv_pads=conv_pads,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
name="prob_network",
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = tensor_utils.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer)
)
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalConvPolicy, self).__init__(env_spec)
LayersPowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: tf.cast(obs_var, tf.float32)}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
| 3,662 | 36.762887 | 97 | py |
rllab | rllab-master/sandbox/rocky/tf/policies/deterministic_mlp_policy.py | from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import Policy
from sandbox.rocky.tf.misc import tensor_utils
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.layers import batch_norm
from sandbox.rocky.tf.spaces.discrete import Discrete
import tensorflow as tf
class DeterministicMLPPolicy(Policy, LayersPowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
prob_network=None,
bn=False):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if prob_network is None:
prob_network = MLP(
input_shape=(env_spec.observation_space.flat_dim,),
output_dim=env_spec.action_space.flat_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
# batch_normalization=True,
name="prob_network",
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = tensor_utils.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer, deterministic=True)
)
self.prob_network = prob_network
# Note the deterministic=True argument. It makes sure that when getting
# actions from single observations, we do not update params in the
# batch normalization layers.
# TODO: this doesn't currently work properly in the tf version so we leave out batch_norm
super(DeterministicMLPPolicy, self).__init__(env_spec)
LayersPowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
action = self._f_prob([flat_obs])[0]
return action, dict()
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
actions = self._f_prob(flat_obs)
return actions, dict()
def get_action_sym(self, obs_var):
return L.get_output(self.prob_network.output_layer, obs_var)
| 2,794 | 36.266667 | 97 | py |
rllab | rllab-master/sandbox/rocky/tf/algos/npo.py |
from rllab.misc import ext
from rllab.misc.overrides import overrides
import rllab.misc.logger as logger
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from sandbox.rocky.tf.algos.batch_polopt import BatchPolopt
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
class NPO(BatchPolopt):
"""
Natural Policy Optimization.
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
step_size=0.01,
**kwargs):
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
self.optimizer = optimizer
self.step_size = step_size
super(NPO, self).__init__(**kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = tensor_utils.new_tensor(
'advantage',
ndim=1 + is_recurrent,
dtype=tf.float32,
)
dist = self.policy.distribution
old_dist_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name='old_%s' % k)
for k, shape in dist.dist_info_specs
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name=k)
for k, shape in self.policy.state_info_specs
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name="valid")
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if is_recurrent:
mean_kl = tf.reduce_sum(kl * valid_var) / tf.reduce_sum(valid_var)
surr_loss = - tf.reduce_sum(lr * advantage_var * valid_var) / tf.reduce_sum(valid_var)
else:
mean_kl = tf.reduce_mean(kl)
surr_loss = - tf.reduce_mean(lr * advantage_var)
input_list = [
obs_var,
action_var,
advantage_var,
] + state_info_vars_list + old_dist_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
loss=surr_loss,
target=self.policy,
leq_constraint=(mean_kl, self.step_size),
inputs=input_list,
constraint_name="mean_kl"
)
return dict()
@overrides
def optimize_policy(self, itr, samples_data):
all_input_values = tuple(ext.extract(
samples_data,
"observations", "actions", "advantages"
))
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
all_input_values += tuple(state_info_list) + tuple(dist_info_list)
if self.policy.recurrent:
all_input_values += (samples_data["valids"],)
logger.log("Computing loss before")
loss_before = self.optimizer.loss(all_input_values)
logger.log("Computing KL before")
mean_kl_before = self.optimizer.constraint_val(all_input_values)
logger.log("Optimizing")
self.optimizer.optimize(all_input_values)
logger.log("Computing KL after")
mean_kl = self.optimizer.constraint_val(all_input_values)
logger.log("Computing loss after")
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', loss_before - loss_after)
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 4,814 | 35.755725 | 109 | py |
rllab | rllab-master/sandbox/rocky/tf/algos/vpg.py |
from rllab.misc import logger
from rllab.misc import ext
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.algos.batch_polopt import BatchPolopt
from sandbox.rocky.tf.optimizers.first_order_optimizer import FirstOrderOptimizer
from sandbox.rocky.tf.misc import tensor_utils
from rllab.core.serializable import Serializable
import tensorflow as tf
class VPG(BatchPolopt, Serializable):
"""
Vanilla Policy Gradient.
"""
def __init__(
self,
env,
policy,
baseline,
optimizer=None,
optimizer_args=None,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = tensor_utils.new_tensor(
name='advantage',
ndim=1 + is_recurrent,
dtype=tf.float32,
)
dist = self.policy.distribution
old_dist_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name='old_%s' % k)
for k, shape in dist.dist_info_specs
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {
k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name=k)
for k, shape in self.policy.state_info_specs
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = tf.placeholder(tf.float32, shape=[None, None], name="valid")
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
if is_recurrent:
surr_obj = - tf.reduce_sum(logli * advantage_var * valid_var) / tf.reduce_sum(valid_var)
mean_kl = tf.reduce_sum(kl * valid_var) / tf.reduce_sum(valid_var)
max_kl = tf.reduce_max(kl * valid_var)
else:
surr_obj = - tf.reduce_mean(logli * advantage_var)
mean_kl = tf.reduce_mean(kl)
max_kl = tf.reduce_max(kl)
input_list = [obs_var, action_var, advantage_var] + state_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
f_kl = tensor_utils.compile_function(
inputs=input_list + old_dist_info_vars_list,
outputs=[mean_kl, max_kl],
)
self.opt_info = dict(
f_kl=f_kl,
)
@overrides
def optimize_policy(self, itr, samples_data):
logger.log("optimizing policy")
inputs = ext.extract(
samples_data,
"observations", "actions", "advantages"
)
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data["valids"],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer.loss(inputs)
self.optimizer.optimize(inputs)
loss_after = self.optimizer.loss(inputs)
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
mean_kl, max_kl = self.opt_info['f_kl'](*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 4,899 | 34.766423 | 109 | py |
rllab | rllab-master/sandbox/rocky/tf/algos/trpo.py |
from sandbox.rocky.tf.algos.npo import NPO
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
class TRPO(NPO):
"""
Trust Region Policy Optimization
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
**kwargs):
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TRPO, self).__init__(optimizer=optimizer, **kwargs)
| 578 | 25.318182 | 95 | py |
rllab | rllab-master/sandbox/rocky/tf/algos/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/algos/npg.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/algos/batch_polopt.py | import time
from rllab.algos.base import RLAlgorithm
import rllab.misc.logger as logger
from sandbox.rocky.tf.policies.base import Policy
import tensorflow as tf
from sandbox.rocky.tf.samplers.batch_sampler import BatchSampler
from sandbox.rocky.tf.samplers.vectorized_sampler import VectorizedSampler
from rllab.sampler.utils import rollout
class BatchPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods.
This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.
"""
def __init__(
self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
batch_size=5000,
max_path_length=500,
discount=0.99,
gae_lambda=1,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
fixed_horizon=False,
sampler_cls=None,
sampler_args=None,
force_batch_sampler=False,
**kwargs
):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of samples per iteration.
:param max_path_length: Maximum length of a single rollout.
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are always positive. When used in
conjunction with center_adv the advantages will be standardized before shifting.
:param store_paths: Whether to save all paths data to the snapshot.
:return:
"""
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
if sampler_cls is None:
if self.policy.vectorized and not force_batch_sampler:
sampler_cls = VectorizedSampler
else:
sampler_cls = BatchSampler
if sampler_args is None:
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.init_opt()
def start_worker(self):
self.sampler.start_worker()
def shutdown_worker(self):
self.sampler.shutdown_worker()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr)
def process_samples(self, itr, paths):
return self.sampler.process_samples(itr, paths)
def train(self, sess=None):
created_session = True if (sess is None) else False
if sess is None:
sess = tf.Session()
sess.__enter__()
sess.run(tf.global_variables_initializer())
self.start_worker()
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.log("Obtaining samples...")
paths = self.obtain_samples(itr)
logger.log("Processing samples...")
samples_data = self.process_samples(itr, paths)
logger.log("Logging diagnostics...")
self.log_diagnostics(paths)
logger.log("Optimizing policy...")
self.optimize_policy(itr, samples_data)
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr, samples_data) # , **kwargs)
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular('ItrTime', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
if self.plot:
rollout(self.env, self.policy, animated=True, max_path_length=self.max_path_length)
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.shutdown_worker()
if created_session:
sess.close()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using tensorflow, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
| 6,100 | 36.89441 | 111 | py |
rllab | rllab-master/sandbox/rocky/tf/spaces/box.py | from rllab.spaces.box import Box as TheanoBox
import tensorflow as tf
class Box(TheanoBox):
def new_tensor_variable(self, name, extra_dims, flatten=True):
if flatten:
return tf.placeholder(tf.float32, shape=[None] * extra_dims + [self.flat_dim], name=name)
return tf.placeholder(tf.float32, shape=[None] * extra_dims + list(self.shape), name=name)
@property
def dtype(self):
return tf.float32
| 444 | 30.785714 | 101 | py |
rllab | rllab-master/sandbox/rocky/tf/spaces/discrete.py | from rllab.spaces.base import Space
import numpy as np
from rllab.misc import special
from rllab.misc import ext
import tensorflow as tf
class Discrete(Space):
"""
{0,1,...,n-1}
"""
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def sample_n(self, n):
return np.random.randint(low=0, high=self.n, size=n)
def contains(self, x):
x = np.asarray(x)
return x.shape == () and x.dtype.kind == 'i' and x >= 0 and x < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return self.n == other.n
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def default_value(self):
return 0
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
def new_tensor_variable(self, name, extra_dims):
# needed for safe conversion to float32
return tf.placeholder(dtype=tf.uint8, shape=[None] * extra_dims + [self.flat_dim], name=name)
@property
def dtype(self):
return tf.uint8
def __eq__(self, other):
if not isinstance(other, Discrete):
return False
return self.n == other.n
def __hash__(self):
return hash(self.n)
| 1,672 | 21.306667 | 101 | py |
rllab | rllab-master/sandbox/rocky/tf/spaces/__init__.py | from .product import Product
from .discrete import Discrete
from .box import Box
__all__ = ["Product", "Discrete", "Box"]
| 123 | 19.666667 | 40 | py |
rllab | rllab-master/sandbox/rocky/tf/spaces/product.py | from rllab.spaces.base import Space
import tensorflow as tf
import numpy as np
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert len(components) == 1
components = components[0]
self._components = tuple(components)
dtypes = [c.dtype for c in components]
if len(dtypes) > 0 and hasattr(dtypes[0], "as_numpy_dtype"):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple(x.sample() for x in self._components)
@property
def components(self):
return self._components
def contains(self, x):
return isinstance(x, tuple) and all(c.contains(xi) for c, xi in zip(self._components, x))
def new_tensor_variable(self, name, extra_dims):
return tf.placeholder(
dtype=self._common_dtype,
shape=[None] * extra_dims + [self.flat_dim],
name=name,
)
@property
def dtype(self):
return self._common_dtype
@property
def flat_dim(self):
return int(np.sum([c.flat_dim for c in self._components]))
def flatten(self, x):
return np.concatenate([c.flatten(xi) for c, xi in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [c.flatten_n(xi) for c, xi in zip(self.components, xs_regrouped)]
return np.concatenate(flat_regrouped, axis=-1)
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:-1])
return tuple(c.unflatten(xi) for c, xi in zip(self._components, flat_xs))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [c.unflatten_n(xi) for c, xi in zip(self.components, flat_xs)]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if not isinstance(other, Product):
return False
return tuple(self.components) == tuple(other.components)
def __hash__(self):
return hash(tuple(self.components))
| 2,360 | 33.217391 | 97 | py |
rllab | rllab-master/sandbox/rocky/tf/misc/tensor_utils.py | import tensorflow as tf
import numpy as np
def compile_function(inputs, outputs, log_name=None):
def run(*input_vals):
sess = tf.get_default_session()
return sess.run(outputs, feed_dict=dict(list(zip(inputs, input_vals))))
return run
def flatten_tensor_variables(ts):
return tf.concat(axis=0, values=[tf.reshape(x, [-1]) for x in ts])
def unflatten_tensor_variables(flatarr, shapes, symb_arrs):
arrs = []
n = 0
for (shape, symb_arr) in zip(shapes, symb_arrs):
size = np.prod(list(shape))
arr = tf.reshape(flatarr[n:n + size], shape)
arrs.append(arr)
n += size
return arrs
def new_tensor(name, ndim, dtype):
return tf.placeholder(dtype=dtype, shape=[None] * ndim, name=name)
def new_tensor_like(name, arr_like):
return new_tensor(name, arr_like.get_shape().ndims, arr_like.dtype.base_dtype)
def concat_tensor_list(tensor_list):
return np.concatenate(tensor_list, axis=0)
def concat_tensor_dict_list(tensor_dict_list):
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
# tensor_shape = np.array(tensor_list[0]).shape
# if tensor_shape is tuple():
# return np.array(tensor_list)
# return np.vstack(tensor_list)
def stack_tensor_dict_list(tensor_dict_list):
"""
Stack a list of dictionaries of {tensors or dictionary of tensors}.
:param tensor_dict_list: a list of dictionaries of {tensors or dictionary of tensors}.
:return: a dictionary of {stacked tensors or dictionary of stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def to_onehot_sym(inds, dim):
return tf.one_hot(inds, depth=dim, on_value=1, off_value=0)
def pad_tensor(x, max_len):
return np.concatenate([
x,
np.tile(np.zeros_like(x[0]), (max_len - len(x),) + (1,) * np.ndim(x[0]))
])
def pad_tensor_n(xs, max_len):
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
| 3,406 | 27.157025 | 90 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.