repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/nystrom.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
from preconditioner import Preconditioner
import time
"""
Nystrom Preconditioner
"""
class Nystrom(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
addNoise - Flag indicating whether to add likelihood variance to kernel matrix
"""
def __init__(self, X, kern, Xm, addNoise=True):
super(Nystrom, self).__init__("Nystrom")
start = time.time()
self.kern = kern
self.X = X
N = np.shape(X)[0]
M = np.shape(Xm)[0]
self.M = M
self.N = N
Kxm = kern.K(X, Xm)
Km = kern.K(Xm, Xm)
self.Kxm = Kxm
self.Km = Km + 1e-6*np.identity(M) # jitter
self.KmInv = np.linalg.inv(self.Km)
if addNoise:
self.precon = np.dot(np.dot(Kxm,self.KmInv),Kxm.T) + self.kern.noise*np.identity(N)
else:
self.precon = np.dot(np.dot(Kxm,self.KmInv),Kxm.T)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
N = np.shape(self.X)[0]
M = np.shape(self.Km)[0]
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(M)
eigs, eigv = np.linalg.eig(self.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.Kxm.T))
return inv_noise*self.woodbury_inversion(np.identity(N), left, noise_matrix, right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
return Ainv - np.dot(left_outer, np.dot(inner, right_outer))
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
noise = self.kern.noise
inv_noise = float(1) / noise
inv_noise_matrix = inv_noise*np.identity(np.shape(self.X)[0])
Ainv = inv_noise_matrix
U = self.Kxm
Cinv = self.Km
V = self.Kxm.T
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
eigs, eigv = np.linalg.eig(self.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.Kxm.T))
return self.laplace_woodbury_inversion(left, right, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 3,672 | 26.616541 | 86 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/pitc.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
from scipy.linalg import block_diag
"""
Partially-independent Training Conditional (FITC) Preconditioner
"""
class PITC(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
"""
def __init__(self, X, kern, Xm):
super(PITC, self).__init__("PITC")
M = np.shape(Xm)[0]
self.M = M
start = time.time()
X_split = np.array_split(X, M)
self.kern = kern
kern_blocks = np.zeros((M),dtype=object)
for t in xrange(M):
nyst = Nystrom(X_split[t], kern, Xm, False)
size = np.shape(X_split[t])[0]
kern_blocks[t] = kern.K(X_split[t], X_split[t]) - nyst.precon + (kern.noise)*np.identity(size)
self.blocks = kern_blocks
blocked = block_diag(*kern_blocks)
self.nyst = Nystrom(X, kern, Xm, False)
self.precon = self.nyst.precon + blocked
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
invertedBlock = self.get_block_inversion()
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.nyst.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.nyst.Kxm.T))
return self.woodbury_inversion(invertedBlock, left, np.identity(M), right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
inverted_block = self.get_block_inversion()
Ainv = inverted_block
U = self.nyst.Kxm
Cinv = self.nyst.Km
V = self.nyst.Kxm.T
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Invert block diagonal matrix block by block.
"""
def get_block_inversion(self):
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
inverted_blocks[i] = np.linalg.inv(diag_blocks[i])
return block_diag(*inverted_blocks)
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
inverted_block = self.get_laplace_block_inversion(Wsqrt)
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(np.diag(Wsqrt.flatten()), np.dot(self.nyst.Kxm, np.dot(eigv, eigsD)))
right = np.dot(eigsD, np.dot(eigv.T, np.dot(self.nyst.Kxm.T, np.diag(Wsqrt.flatten()))))
return self.woodbury_inversion(inverted_block, left, np.identity(self.M), right)
def get_laplace_block_inversion(self, Wsqrt):
diag_blocks = self.blocks
Wsqrt_split = np.array_split(Wsqrt, self.M)
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
Wblock = np.diag(Wsqrt_split[i].flatten())
block = np.dot(Wblock, np.dot(diag_blocks[i], Wblock))
inverted_blocks[i] = np.linalg.inv(block + np.identity(len(Wblock)))
return block_diag(*inverted_blocks)
| 4,201 | 28.591549 | 98 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/preconditioner.py | """
Superclass for classes of Preconditioners.
"""
class Preconditioner(object):
def __init__(self, name = ""):
self.name = name
| 143 | 15 | 42 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/fitc.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
"""
Fully-independent Training Conditional (FITC) Preconditioner
"""
class FITC(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
"""
def __init__(self, X, kern, Xm):
super(FITC, self).__init__("FITC")
M = np.shape(Xm)[0]
N = np.shape(X)[0]
self.kern = kern
start = time.time()
k = kern.K(X,X)
self.nyst = Nystrom(X, kern, Xm, False)
self.diag = np.diag(k - self.nyst.precon + (kern.noise)*np.identity(N))
self.precon = self.nyst.precon + np.diag(self.diag)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
inv = 1 / self.diag
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.nyst.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.nyst.Kxm.T))
return self.woodbury_inversion(np.diag(inv), left, np.identity(M), right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
Ainv = self.Ainv
U = self.leftU
Cinv = self.Cinv
V = self.rightV
right_outer = np.dot(V, np.dot(Ainv, b))
if (self.inner is None):
self.inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(self.inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(np.diag(Wsqrt.flatten()), np.dot(self.nyst.Kxm, np.dot(eigv, eigsD)))
right = np.dot(eigsD, np.dot(eigv.T, np.dot(self.nyst.Kxm.T, np.diag(Wsqrt.flatten()))))
A = np.reshape(self.diag,(-1,1))*W + 1
Ainv = 1/A
return self.woodbury_inversion(np.diag(Ainv.flatten()), left, np.identity(M), right)
| 3,171 | 26.344828 | 90 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/spectral.py | #This implementation of spectral GP approximation is based on the article:
#
# @article{lazaro2010sparse,
# title={Sparse spectrum Gaussian process regression},
# author={L{\'a}zaro-Gredilla, Miguel and Qui{\~n}onero-Candela, Joaquin and Rasmussen, Carl Edward and Figueiras-Vidal, An{\'\i}bal R},
# journal={The Journal of Machine Learning Research},
# volume={11},
# pages={1865--1881},
# year={2010},
# publisher={JMLR. org}
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
from ..util.ssgp import SsgpHelper
"""
Random Fourier Features (Spectral) Preconditioner
"""
class Spectral(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
Y - Target labels
kern - Class of kernel function
M - Number of Fourier features
"""
def __init__(self, X, Y, kern, M):
super(Spectral, self).__init__("Spectral")
start = time.time()
self.M = M
self.kern = kern
[N, D] = X.shape
self.N = N
ssgp_helper = SsgpHelper()
S = ssgp_helper.optimize_frequency_points(X, kern, Y, M, D)
W = np.reshape(S, (M, D), order='F')
phi = 2*np.pi*np.dot(X, W.T)
phi = np.sqrt(kern.variance/float(M))*np.hstack((np.cos(phi), np.sin(phi)))
A = np.dot(phi, phi.T) + kern.noise*np.identity(N)
self.precon = A
self.Kxm = phi
self.duration = time.time() - start
"""
Compute inversion of the Preconditioner
"""
def get_inversion(self):
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(2*self.M)
return inv_noise*self.woodbury_inversion(np.identity(self.N), self.Kxm, noise_matrix, self.Kxm.T)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
return self.laplace_woodbury_inversion(self.Kxm, self.Kxm.T, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(2*self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 2,446 | 28.481928 | 138 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/__init__.py | from preconditioner import Preconditioner
from blockJacobi import BlockJacobi
from nystrom import Nystrom
from svd import SVD
from kiss import Kiss
from pitc import PITC
from fitc import FITC
from spectral import Spectral | 221 | 26.75 | 41 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/blockJacobi.py | import numpy as np
from scipy.linalg import block_diag
from preconditioner import Preconditioner
import time
"""
Block Jacobi Preconditioner
"""
class BlockJacobi(Preconditioner):
"""
Construct preconditioner
X - Training data
kern - Class of kernel function
M - Number of points ber block
"""
def __init__(self, X, kern, M):
super(BlockJacobi, self).__init__("BlockJacobi")
self.M = M
start = time.time()
X_split = np.array_split(X, M)
kern_blocks = np.zeros((M),dtype=object)
for t in xrange(M):
size = np.shape(X_split[t])[0]
kern_blocks[t] = kern.K(X_split[t], X_split[t]) + kern.noise*np.identity(size)
self.duration = time.time()-start
self.blocks = kern_blocks
self.precon = block_diag(*kern_blocks)
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
inverted_blocks[i] = np.linalg.inv(diag_blocks[i])
inverted_diag = block_diag(*inverted_blocks)
return inverted_diag
"""
Compute inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
Wsqrt_split = np.array_split(Wsqrt, self.M)
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
Wblock = np.diag(Wsqrt_split[i].flatten())
block = np.dot(Wblock, np.dot(diag_blocks[i], Wblock))
inverted_blocks[i] = np.linalg.inv(block + np.identity(len(Wblock)))
inverted_diag = block_diag(*inverted_blocks)
return inverted_diag
| 1,826 | 28.467742 | 87 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/kernels/rbf.py | import numpy as np
from scipy.spatial.distance import cdist
from kernel import Kernel
"""
Implementation of isotropic RBF/SE kernel
"""
class RBF(Kernel):
def __init__(self, lengthscale=1, variance=1, noise=1):
super(RBF, self).__init__("RBF")
self.lengthscale = lengthscale
self.variance = variance
self.jitter = 1e-9
self.noise = noise / self.variance + self.jitter# dividing by variance for new strategy
def K(self, X1, X2):
""" GP squared exponential kernel """
pairwise_dists = cdist(X1, X2, 'euclidean')
return self.variance*np.exp(-0.5 * (pairwise_dists ** 2) / self.lengthscale ** 2)
def K_scalar(self, X1, X2, original_dimensions):
pairwise_dists = cdist(X1, X2, 'euclidean')
return (self.variance**(float(1)/original_dimensions)) * np.exp(-0.5 * pairwise_dists ** 2 / self.lengthscale ** 2)
| 898 | 33.576923 | 123 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/kernels/matern32.py | import numpy as np
from scipy.spatial.distance import cdist
from kernel import Kernel
"""
Implementation of isotropic Matern-3/2 kernel
"""
class Matern32(Kernel):
def __init__(self, lengthscale=1, variance=1, noise=1):
super(Matern32, self).__init__("Matern 3/2")
self.lengthscale = lengthscale
self.variance = variance
self.noise = noise # adding jitter for numerical stability
def K(self, X1, X2):
""" GP matern-3/2 kernel """
pairwise_dists = cdist(X1, X2, 'euclidean')/self.lengthscale
return self.variance * (1. + np.sqrt(3.) * pairwise_dists) * np.exp(-np.sqrt(3.) * pairwise_dists)
def K_scalar(self, X1, X2, original_dimensions):
raise NotImplementedError | 745 | 31.434783 | 106 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/kernels/__init__.py | from kernel import Kernel
from rbf import RBF
from matern32 import Matern32 | 75 | 24.333333 | 29 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/kernels/kernel.py | """
Superclass for classes of Kernel functions.
"""
class Kernel(object):
def __init__(self, name = ""):
self.name = name
"""
Computation of Kernel matrix for the given inputs - Noise excluded
"""
def K(self, X1, X2):
raise NotImplementedError
"""
Computation of scalar Kernel matrix - for grid inputs
"""
def K_scalar(self, X1, X2, original_dimensions):
raise NotImplementedError | 399 | 19 | 67 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/__init__.py | import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import methods
import preconditioners
import kernels
def load(file_path):
"""
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
:param file_name: path/to/file.pickle
"""
import cPickle as pickle
try:
with open(file_path, 'rb') as f:
m = pickle.load(f)
except:
import pickle as pickle
with open(file_path, 'rb') as f:
m = pickle.load(f)
return m
| 530 | 21.125 | 75 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/cg.py | import numpy as np
"""
Solve linear system using conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
init - Initial solution
thershold - Termintion criteria
"""
class Cg(object):
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
self.K = K
self.Y = Y.flatten()
x = init
r = Y - np.dot(K, x) #initialise residual gradient
p = r
t = 0
while True:
alpha = np.dot(r.T, r) / np.dot(p.T, np.dot(K, p))
x = x + alpha*p
r_prev = r
r = r - alpha*np.dot(K, p)
if ((np.dot(r.T,r).flatten() < (threshold*N)) or (t>15000)):
break
beta = np.dot(r.T, r) / np.dot(r_prev.T, r_prev)
p = r + beta*p
t = t + 1
self.iterations = t
self.result = x
| 772 | 17.853659 | 63 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/laplaceCg.py | import numpy as np
from scipy.stats import norm
from cg import Cg
import random
"""
Laplace approximation using conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class LaplaceCg(object):
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
cgRes = Cg(B, interim, threshold=threshold)
s1 = cgRes.result
innerC = innerC + cgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
| 1,118 | 20.519231 | 68 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/regularPcg.py | import numpy as np
"""
Solve linear system using regular preconditioned conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for outer loop
preconInv - Inversion of preconditioner Matrix
"""
class RegularPcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, preconInv=None):
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
if preconInv is None:
preconInv = np.linalg.inv(P)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r = Y - np.dot(K, x) #initialise residual gradient
z = np.dot(preconInv, r)
p = z
outerC = 0
while True:
alpha = np.dot(r.T, z) / np.dot(p.T,np.dot(K, p))
x = x + alpha*p
r_prev = r
r = r - alpha*np.dot(K,p)
if (np.dot(r.T, r).flatten() < threshold*N or outerC>10000):
break
z_prev = z
z = np.dot(preconInv, r)
beta = np.dot(z.T, r) / np.dot(z_prev.T, r_prev)
p = z + beta*p
outerC = outerC + 1
self.iterations = outerC
self.result = x
| 1,109 | 20.764706 | 72 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/flexPcg.py | import numpy as np
from cg import Cg
"""
Solve linear system using flexible conjugate gradient (without truncation)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for outer loop
innerThreshold - Termination criteria for inner loop
"""
class FlexiblePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, innerThreshold=1e-9):
N = np.shape(K)[0]
if init is None:
init = np.zeros(N)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros(N)
r = self.Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
innerC = 0
while True:
diff = r - r_prev
if (np.dot(diff.T,diff).flatten() < threshold) or k>5000:
break
interim = Cg(P, r, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
sum = 0
for i in xrange(k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + np.dot(frac, p[i])
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + np.dot(alpha,p[k])
r_prev = r
r = r - np.dot(alpha, np.dot(K, p[k]))
k = k + 1
self.result = x
self.iterations = innerC + k | 1,709 | 27.983051 | 95 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/kronCgDirect.py | import numpy as np
from ..util.kronHelper import KronHelper
from scipy import sparse
import time
"""
Solve linear system using conjugate gradient (intended for SKI inference)
Params:
K - Covariance Matrix
Ws - Sparse representation of weight matrix W
WTs - Sparse representation of transposed wieght matrix W
Ku - Array of dimension-specific kernels
Y - Target labels
noise - Variance of the likelihood
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class KronCgDirect(object):
def __init__(self, K, Ws, WTs, Ku, Y, noise, init=None, threshold=1e-9):
N = len(Y)
if init is None:
init = np.zeros(N)
self.Y = Y.flatten()
x = init
prod = sparse.csr_matrix.dot(Ws, KronHelper().kron_mvprod(Ku, sparse.csr_matrix.dot(WTs, x))).flatten() + np.dot(noise*np.identity(N), x)
r = self.Y - prod #initialise residual gradient
p = r
t = 1
while True:
prod = sparse.csr_matrix.dot(Ws, KronHelper().kron_mvprod(Ku, sparse.csr_matrix.dot(WTs, p))).flatten() + np.dot(noise*np.identity(N), p)
alpha = np.dot(r.T, r) / np.dot(p.T, prod)
x = x + np.dot(alpha, p)
r_prev = r
r = r - np.dot(alpha, prod)
if (np.dot(r.T,r).flatten() < threshold*N or t>15000):
break
beta = np.dot(r.T, r) / np.dot(r_prev.T, r_prev)
p = r + np.dot(beta, p)
t = t + 1
self.iterations = t
self.result = x
| 1,384 | 29.108696 | 140 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/kronTruncFlexPcg.py | import numpy as np
from cg import Cg
from scipy import sparse
from kronCgDirect import KronCgDirect
"""
Solve linear system using truncated flexible conjugate gradient (intended for SKI inference)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioning matrix
W - Weight matrix W
Ku - Array of dimension-specific kernels
kern - Kernel class
init - Initial solution
threshold - Termintion criteria for algorithm
innerThreshold - Termination criteria for inner loop
"""
class KronTruncatedFlexiblePcg(object):
def __init__(self, K, Y, P, W, Ku, kern=None, init=None, threshold=1e-9, innerThreshold=1e-9):
mMax = 15
N = np.shape(K)[0]
if init is None:
init = np.zeros(N)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros(N)
r = self.Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
Ws = sparse.csr_matrix(W)
WTs = sparse.csr_matrix(W.T)
innerC = 0
while True:
if (np.dot(r.T,r).flatten() < threshold*N or k>15000):
break
interim = KronCgDirect(P, Ws, WTs, Ku, r, kern.noise, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
m = max(1, k % (mMax+1))
sum = 0
if (k-m < 0):
start = 0
else:
start = k - m
for i in xrange((k-m), k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + np.dot(frac, p[i])
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + np.dot(alpha,p[k])
r_prev = r
r = r - np.dot(alpha, np.dot(K, p[k]))
k = k + 1
self.outer_iterations = k
self.result = x
self.iterations = innerC + k | 2,176 | 28.418919 | 98 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/truncFlexPcg.py | import numpy as np
from cg import Cg
"""
Solve linear system using flexible conjugate gradient (with truncation)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
thershold - Termintion criteria for outer loop
innerThreshold - Termination criteria for inner loop
"""
class TruncatedFlexiblePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, innerThreshold=1e-9):
mMax = 15
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros((N,1))
r = Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
innerC = 0
while True:
if (np.dot(r.T,r).flatten() < threshold*N or k>50000):
break
interim = Cg(P, r, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
m = max(1, k % (mMax+1))
sum = 0
if (k-m < 0):
start = 0
else:
start = k - m
for i in xrange((k-m), k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + frac* p[i]
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + alpha*p[k]
r_prev = r
r = r - alpha*np.dot(K, p[k])
k = k + 1
self.outer_iterations = k
self.result = x
self.iterations = innerC + k | 1,869 | 27.333333 | 95 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/__init__.py | from cg import Cg
from regularPcg import RegularPcg
from flexPcg import FlexiblePcg
from truncFlexPcg import TruncatedFlexiblePcg
from kronCgDirect import KronCgDirect
from kronTruncFlexPcg import KronTruncatedFlexiblePcg
from laplaceCg import LaplaceCg
from laplacePcg import LaplacePcg | 287 | 35 | 53 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/methods/laplacePcg.py | import numpy as np
from scipy.stats import norm
from regularPcg import RegularPcg
import random
"""
Laplace approximation using preconditioned conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class LaplacePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, precon=None):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
pcgRes = RegularPcg(B, interim, None, threshold=threshold, preconInv=P.get_laplace_inversion(W,Wsqrt))
s1 = pcgRes.result
innerC = innerC + pcgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC | 1,278 | 23.132075 | 105 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/util/inducingPointsHelper.py | #This implementation is based on the article:
# @inproceedings{snelson2005sparse,
# title={Sparse Gaussian processes using pseudo-inputs},
# author={Snelson, Edward and Ghahramani, Zoubin},
# booktitle={Advances in neural information processing systems},
# pages={1257--1264},
# year={2005}
# }
from __future__ import division
import numpy as np
from scipy.spatial.distance import cdist, squareform, pdist
from scipy.optimize import fmin_l_bfgs_b
from scipy.linalg import cholesky
import random as ran
import numpy.matlib,numpy.linalg,numpy.random
"""
Helper class for inducing point methods.
"""
class InducingPointsHelper(object):
def __init__(self, seed):
ran.seed(seed)
self.name = "InducingPointsHelper"
"""
Returns a random selection of points from the given dataset
X - Dataset
M - Number of points to be selected
"""
def get_random_inducing_points(self, X, M):
rand = ran.sample(range(0, X.shape[0]), M)
return X[rand]
"""
Procedure for optimizing the given inducing points
X - Dataset
Y - Target labels
M - Number of inducing points
kern - Class of kernel function
"""
def optimize_inducing_points(self, X, Y, M, kern):
dim = np.shape(X)[1]
hyp_init = np.ones((dim+2, 1))
for i in xrange(dim):
hyp_init[i] = kern.lengthscale
hyp_len = len(hyp_init)
hyp_init[hyp_len - 2] = kern.variance
hyp_init[hyp_len - 1] = kern.noise
rand = ran.sample(range(0, X.shape[0]), M)
I = X[rand]
W = np.vstack((np.reshape(I, (M*dim,1), order='F'), hyp_init))
res = fmin_l_bfgs_b(self.like_spgp, W, iprint=False, args=(X,Y,M))[0]
return np.reshape(res[0:M*dim], (M, dim), order='F')
def dist(self, x1, x2):
x1 = np.reshape(x1,(-1,1))
x2 = np.reshape(x2,(-1,1))
n1 = len(x1)
n2 = len(x2)
return np.matlib.repmat(x1,1,n2) - np.matlib.repmat(x2.T,n1,1)
"""
Procedure for evaluating likelihood of the inducing point approximation
W - Array of hyperparameters (incl. inducing points)
x - Dataset
y - Target labels
M - Number of inducing points
kern - Class of kernel function
"""
def like_spgp(self, W, x, y, M):
jitter = 1e-6
N = np.shape(x)[0]
dim = np.shape(x)[1]
length = len(W)
pts = W[0:(length-2-dim)]
xb = np.reshape(pts, (M, dim), order='F')
b = np.exp(W[(length-2-dim):(length-2)])
c = np.exp(W[length-2])
sig = np.exp(W[length-1])
xb = xb * np.matlib.repmat(np.sqrt(b).T, M, 1)
x = x * np.matlib.repmat(np.sqrt(b).T, N, 1)
Q = np.dot(xb, xb.T)
diag = np.reshape(np.diag(Q), (-1,1))
Q = np.matlib.repmat(diag,1,M) + np.matlib.repmat(diag.T, M, 1) - 2*Q
Q = c*np.exp(-0.5*Q) + jitter*np.identity(M)
x_sum = np.reshape(np.sum(x*x, axis=1), (-1,1))
xb_sum = np.reshape(np.sum(xb*xb, axis=1), (-1,1))
K = -2*np.dot(xb, x.T) + np.matlib.repmat(x_sum.T,M,1) + np.matlib.repmat(xb_sum,1,N)
K = c*np.exp(-0.5*K)
L = cholesky(Q,lower=False).T
V = np.linalg.solve(L,K)
vSum = np.reshape(np.sum(np.power(V,2), axis=0),(-1,1))
ep = 1 + (c - vSum.T)/sig
epSqrt = np.reshape(np.sqrt(ep), (-1, 1))
K = K / np.matlib.repmat(epSqrt.T,M,1)
V = V / np.matlib.repmat(epSqrt.T,M,1)
y = y / epSqrt
Lm = cholesky(sig*np.identity(M) + np.dot(V,V.T), lower=False).T
invLmV = np.linalg.solve(Lm,V)
bet = np.dot(invLmV, y)
# Likelihood
fw = np.sum(np.log(np.diag(Lm))) + (N-M)/2*np.log(sig) + (np.dot(y.T,y) - np.dot(bet.T,bet))/2/sig + np.sum(np.log(ep))/2 + 0.5*N*np.log(2*np.pi)
# Derivatives
Lt = np.dot(L,Lm)
B1 = np.linalg.solve(Lt.T, invLmV)
b1 = np.linalg.solve(Lt.T, bet)
invLV = np.linalg.solve(L.T, V)
invL = np.linalg.inv(L)
invQ = np.dot(invL.T, invL)
invLt = np.linalg.inv(Lt)
invA = np.dot(invLt.T,invLt)
mu = np.dot(np.linalg.solve(Lm.T,bet).T, V).T
sumV = np.reshape(np.sum(np.power(V, 2),axis=0),(-1,1))
sumVsq = sumV.T
sumB = np.reshape(np.sum(invLmV*invLmV,axis=0), (-1,1))
bigSum = y*np.dot(bet.T,invLmV).T/sig - sumB/2 - (np.power(y,2)+np.power(mu,2))/2/sig + 0.5
TT = np.dot(invLV, (invLV.T*np.matlib.repmat(bigSum,1,M)))
dfxb = np.zeros((M,dim))
dfb = np.zeros(dim)
for i in xrange(dim):
dnnQ = self.dist(xb[:,i],xb[:,i])*Q
dNnK = self.dist(-xb[:,i],-x[:,i])*K
epdot = -2/sig*dNnK*invLV
epPmod = -1*np.reshape(np.sum(epdot,axis=0),(-1,1))
sum1 = np.reshape(np.sum((invQ - invA*sig)*dnnQ,axis=1), (-1,1))
sum2 = np.reshape(np.sum(dnnQ*TT,axis=1), (-1,1))
dfxb[:,i] = (-b1*(np.dot(dNnK, (y-mu))/sig + np.dot(dnnQ,b1)) + sum1 + np.dot(epdot,bigSum) - 2/sig*sum2).flatten()
dNnK = dNnK*B1
dfxb[:,i] = dfxb[:,i] + np.sum(dNnK,axis=1)
dfxb[:,i] = dfxb[:,i] * np.sqrt(b[i])
dfc = (M + jitter*np.trace(invQ-sig*invA) - sig*sum2)/2 - np.dot(mu.T, (y-mu))/sig + np.dot(b1.T, np.dot((Q - np.dot(jitter, np.identity(M))), b1))/2 + np.dot(epc,bigSum)
#noise
dfsig = np.sum(bigSum / ep.T)
derivs = np.vstack((np.reshape(dfxb,(M*dim,1),order='F'),np.reshape(dfb[0].flatten(),(-1,1)),np.reshape(dfb[1].flatten(),(-1,1)),np.reshape(dfc,(-1,1)),np.reshape(dfsig,(-1,1)))).flatten()
return fw, derivs | 5,072 | 30.70625 | 190 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/util/kronHelper.py | #This implementation is based on the article:
#
# @article{gilboa2015scaling,
# title={Scaling multidimensional inference for structured Gaussian processes},
# author={Gilboa, Elad and Saat{\c{c}}i, Yunus and Cunningham, John P},
# journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
# volume={37},
# number={2},
# pages={424--436},
# year={2015},
# publisher={IEEE}
# }
import numpy as np
"""
Helper class for methods relying on Kronecker inference.
"""
class KronHelper(object):
def __init__(self):
self.name = "KronHelper"
"""
Compute array of covariance matrices per grid dimension
dimVector - Vector of inducing points per dimension
D - Number of dimensions
kern - Class of kernel function
"""
def kron_inference(self, dimVector, D, kern):
Kds = np.zeros(D, dtype=object) #vector for holding covariance per dimension
K_kron = 1 # kronecker product of eigenvalues
# retrieve the one-dimensional variation of the designated kernel
for d in xrange(D):
xg = dimVector[d]
xg = np.reshape(xg, (len(xg), 1))
Kds[d] = kern.K_scalar(xg, xg, D)
#K_kron = np.kron(K_kron, Kds[d])
return [K_kron, Kds]
"""
Fast matrix-vector multiplication for Kronecker matrices
A - Array of dimension-specific kernels
b - Vector being multiplied
"""
def kron_mvprod(self, A, b):
x = b
N = 1
D = len(A)
G = np.zeros((D,1))
for d in xrange(0, D):
G[d] = len(A[d])
N = np.prod(G)
for d in xrange(D-1, -1, -1):
X = np.reshape(x, (G[d], round(N/G[d])), order='F')
Z = np.dot(A[d], X)
Z = Z.T
x = np.reshape(Z, (-1, 1), order='F')
return x
| 1,634 | 24.546875 | 81 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/util/__init__.py | from kronHelper import KronHelper
from inducingPointsHelper import InducingPointsHelper
from ssgp import SsgpHelper | 115 | 37.666667 | 53 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/util/ssgp.py | #This implementation of spectral GP approximation is based on the article:
#
# @article{lazaro2010sparse,
# title={Sparse spectrum Gaussian process regression},
# author={L{\'a}zaro-Gredilla, Miguel and Qui{\~n}onero-Candela, Joaquin and Rasmussen, Carl Edward and Figueiras-Vidal, An{\'\i}bal R},
# journal={The Journal of Machine Learning Research},
# volume={11},
# pages={1865--1881},
# year={2010},
# publisher={JMLR. org}
# }
from __future__ import division
import numpy as np
import random as ran
from scipy.spatial.distance import cdist, squareform, pdist
from scipy.optimize import fmin_l_bfgs_b
from scipy.linalg import cholesky
import numpy.matlib,numpy.linalg
import math
"""
Helper class for methods based on Fourier features.
"""
class SsgpHelper(object):
def __init__(self):
self.name = "SsgpHelper"
"""
Evaluate likelihood for approximation usinf random fourier features.
"""
def ssgpr(self, X, kern, S, Y):
[N, D] = X.shape
m = len(S)/D
W = np.reshape(S, (m, D), order='F')
phi = np.dot(X, W.T)
phi = np.hstack((np.cos(phi), np.sin(phi)))
A = np.dot(phi.T, phi) + kern.noise*np.identity(2*m)
R = cholesky(A, lower=False)
PhiRi = np.linalg.lstsq(R.T, phi.T)[0] # PhiRi = phi/R
Rtphity = np.dot(PhiRi, Y.flatten())
return 0.5/kern.noise*(np.sum(np.power(Y,2))-kern.noise/m*np.sum(np.power(Rtphity,2))) + np.sum(np.log(np.diag(R))) + (N/2 - m)*np.log(kern.noise)+N/2*np.log(2*np.pi)
"""
Optimize random selection of frequency points by taking the set which maximises the likelihood
over a series of iterations.
"""
def optimize_frequency_points(self, X, kern, Y, M, D):
nlml = np.inf
for k in xrange(5):
#S = np.random.randn(M*D)
S = np.random.multivariate_normal(np.zeros(D), (1/(4*np.pi**2)*(1/kern.lengthscale**2)*np.identity(D)), M).flatten()
#S = np.random.normal(0, 1/(4*np.pi**2*kern.lengthscale**2), M*D)
nlmlc = self.ssgpr(X, kern, S, Y)
if nlmlc<nlml:
S_save = S
nlml = nlmlc
return S_save
| 1,999 | 30.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/svd.py | import numpy as np
import time
from sklearn.utils.extmath import randomized_svd
from preconditioner import Preconditioner
"""
Randomized Singular Value Decomposition (SVD) Preconditioner
"""
class SVD(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
M - Rank of the decomposition
"""
def __init__(self, X, kern, M):
super(SVD, self).__init__("SVD")
start = time.time()
self.X = X
self.kern = kern
K = kern.K(X, X)
N = np.shape(X)[0]
#(self.U, self.Sigma, self.VT) = fb.pca(K, M)#, n_iter=1, l=M)
self.U, self.Sigma, self.VT = randomized_svd(K, M)
self.precon = np.dot(self.U, np.dot(np.diag(self.Sigma), self.VT)) + self.kern.noise*np.identity(N)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
N = np.shape(self.X)[0]
M = np.shape(self.Sigma)[0]
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(M)
# eigs, eigv = np.linalg.eig(np.diag(self.Sigma))
# for i in xrange(len(eigv)):
# if (eigs[i] < self.kern.jitter):
# eigs[i] = self.kern.jitter
# eigs[i] = np.sqrt(eigs[i])
eigs = np.sqrt(self.Sigma)
eigsD = np.diag(eigs)
left = np.dot(self.U, eigsD)
right = np.dot(eigsD, self.VT)
return inv_noise*self.woodbury_inversion(np.identity(N), left, noise_matrix, right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
return Ainv - np.dot(left_outer, np.dot(inner, right_outer))
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
noise = self.kern.noise
inv_noise = float(1) / noise
inv_noise_matrix = inv_noise*np.identity(np.shape(self.X)[0])
inv_sigma = np.diag(1 / self.Sigma)
Ainv = inv_noise_matrix
U = self.U
Cinv = inv_sigma
V = self.VT
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W , Wsqrt):
self.N = np.shape(self.X)[0]
self.M = np.shape(self.Sigma)[0]
eigs = np.sqrt(self.Sigma)
eigsD = np.diag(eigs)
left = np.dot(self.U, eigsD)
right = np.dot(eigsD, self.VT)
return self.laplace_woodbury_inversion(left, right, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 2,924 | 27.676471 | 101 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/kiss.py | #This implementation of Structured Kernel Interpolation is based on the article:
#
# @inproceedings{DBLP:conf/icml/WilsonN15,
# author = {Andrew Gordon Wilson and
# Hannes Nickisch},
# title = {Kernel Interpolation for Scalable Structured Gaussian Processes {(KISS-GP)}},
# booktitle = {Proceedings of the 32nd International Conference on Machine Learning,
# {ICML} 2015, Lille, France, 6-11 July 2015},
# pages = {1775--1784},
# year = {2015},
# crossref = {DBLP:conf/icml/2015},
# url = {http://jmlr.org/proceedings/papers/v37/wilson15.html},
# timestamp = {Sun, 05 Jul 2015 19:10:23 +0200},
# biburl = {http://dblp.uni-trier.de/rec/bib/conf/icml/WilsonN15},
# bibsource = {dblp computer science bibliography, http://dblp.org}
# }
import numpy as np
import time
from preconditioner import Preconditioner
from ..util.kronHelper import KronHelper
import math
"""
SKI Preconditioner
"""
class Kiss(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
"""
def __init__(self, X, kern):
super(Kiss, self).__init__("Kiss")
self.X = X
self.kern = kern
start = time.time()
Xnew = self.normalize_columns(X)
N = Xnew.shape[0]
D = Xnew.shape[1]
num_grid_interval = np.zeros((D))
maximum = np.zeros(D)
minimum = np.zeros(D)
for i in xrange(D):
maximum[i] = max(X[:,i])
minimum[i] = min(X[:,i])
num_grid_interval[i] = round(N**(float(3)/float(2*D)))#round((N**2)**(float(1)/D))
if (num_grid_interval[i] == 1):
num_grid_interval[i] = 2
# construct grid vectors and intervals
interval = np.zeros(D)
vector = np.zeros(D, dtype=object)
for i in xrange(D):
[vector[i],interval[i]] = np.linspace(0, 1, num=num_grid_interval[i], retstep=True)
for i in xrange(D):
num_grid_interval[i] = len(vector[i])
interval_matrix = np.zeros((N, D))
assign = np.zeros(N)
for i in xrange(D):
for j in xrange(N):
interval_matrix[j][i] = self.get_rounded_threshold(Xnew[j][i], interval[i], len(vector[i]), 0, 1)
# construct weight matrix
for j in xrange(N):
val =0
for t in xrange(D):
val = val + interval_matrix[j][t]*np.prod(num_grid_interval[t+1:D])
assign[j] = val
W = np.zeros((N,np.prod(num_grid_interval)))
for i in xrange(N):
index = assign[i]
W[i][index] = 1
kron_helper = KronHelper()
unnormalzed_vector = self.reverse_normalize(vector, minimum, maximum)
[K, Kds] = kron_helper.kron_inference(unnormalzed_vector, D, kern)
#Kski = np.dot(np.dot(W, K), W.T)
self.W = W
self.Ku = Kds
self.precon = None
self.duration = time.time() - start
"""
Normalize the given training data
"""
def normalize_columns(self, array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
maxim = arr[:,col].max()
minim = arr[:,col].min()
arr[:,col] = (arr[:,col] - minim) / (maxim - minim)
return arr
"""
Reverse the normalization carried out on the data
"""
def reverse_normalize(self, array, minimum, maximum):
new_array = np.zeros(len(array), dtype=object)
for i in xrange(len(array)):
new_array[i] = array[i]*(maximum[i] - minimum[i]) + minimum[i]
return new_array
"""
Assign points to designated nearest location in the grid
"""
def get_rounded_threshold(self, a, min_clip, max_interval, minim, maxim):
interval = round(float(a) / min_clip)
rounded_val = interval * min_clip
if (rounded_val > maxim):
return max_interval
if (rounded_val < minim):
return 0
return interval
| 4,131 | 30.784615 | 113 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/nystrom.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
from preconditioner import Preconditioner
import time
"""
Nystrom Preconditioner
"""
class Nystrom(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
addNoise - Flag indicating whether to add likelihood variance to kernel matrix
"""
def __init__(self, X, kern, Xm, addNoise=True):
super(Nystrom, self).__init__("Nystrom")
start = time.time()
self.kern = kern
self.X = X
N = np.shape(X)[0]
M = np.shape(Xm)[0]
self.M = M
self.N = N
Kxm = kern.K(X, Xm)
Km = kern.K(Xm, Xm)
self.Kxm = Kxm
self.Km = Km + 1e-6*np.identity(M) # jitter
self.KmInv = np.linalg.inv(self.Km)
if addNoise:
self.precon = np.dot(np.dot(Kxm,self.KmInv),Kxm.T) + self.kern.noise*np.identity(N)
else:
self.precon = np.dot(np.dot(Kxm,self.KmInv),Kxm.T)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
N = np.shape(self.X)[0]
M = np.shape(self.Km)[0]
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(M)
eigs, eigv = np.linalg.eig(self.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.Kxm.T))
return inv_noise*self.woodbury_inversion(np.identity(N), left, noise_matrix, right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
return Ainv - np.dot(left_outer, np.dot(inner, right_outer))
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
noise = self.kern.noise
inv_noise = float(1) / noise
inv_noise_matrix = inv_noise*np.identity(np.shape(self.X)[0])
Ainv = inv_noise_matrix
U = self.Kxm
Cinv = self.Km
V = self.Kxm.T
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
eigs, eigv = np.linalg.eig(self.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.Kxm.T))
return self.laplace_woodbury_inversion(left, right, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 3,672 | 26.616541 | 86 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/pitc.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
from scipy.linalg import block_diag
"""
Partially-independent Training Conditional (FITC) Preconditioner
"""
class PITC(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
"""
def __init__(self, X, kern, Xm):
super(PITC, self).__init__("PITC")
M = np.shape(Xm)[0]
self.M = M
start = time.time()
X_split = np.array_split(X, M)
self.kern = kern
kern_blocks = np.zeros((M),dtype=object)
for t in xrange(M):
nyst = Nystrom(X_split[t], kern, Xm, False)
size = np.shape(X_split[t])[0]
kern_blocks[t] = kern.K(X_split[t], X_split[t]) - nyst.precon + (kern.noise)*np.identity(size)
self.blocks = kern_blocks
blocked = block_diag(*kern_blocks)
self.nyst = Nystrom(X, kern, Xm, False)
self.precon = self.nyst.precon + blocked
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
invertedBlock = self.get_block_inversion()
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.nyst.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.nyst.Kxm.T))
return self.woodbury_inversion(invertedBlock, left, np.identity(M), right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
inverted_block = self.get_block_inversion()
Ainv = inverted_block
U = self.nyst.Kxm
Cinv = self.nyst.Km
V = self.nyst.Kxm.T
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Invert block diagonal matrix block by block.
"""
def get_block_inversion(self):
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
inverted_blocks[i] = np.linalg.inv(diag_blocks[i])
return block_diag(*inverted_blocks)
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
inverted_block = self.get_laplace_block_inversion(Wsqrt)
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(np.diag(Wsqrt.flatten()), np.dot(self.nyst.Kxm, np.dot(eigv, eigsD)))
right = np.dot(eigsD, np.dot(eigv.T, np.dot(self.nyst.Kxm.T, np.diag(Wsqrt.flatten()))))
return self.woodbury_inversion(inverted_block, left, np.identity(self.M), right)
def get_laplace_block_inversion(self, Wsqrt):
diag_blocks = self.blocks
Wsqrt_split = np.array_split(Wsqrt, self.M)
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
Wblock = np.diag(Wsqrt_split[i].flatten())
block = np.dot(Wblock, np.dot(diag_blocks[i], Wblock))
inverted_blocks[i] = np.linalg.inv(block + np.identity(len(Wblock)))
return block_diag(*inverted_blocks)
| 4,201 | 28.591549 | 98 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/preconditioner.py | """
Superclass for classes of Preconditioners.
"""
class Preconditioner(object):
def __init__(self, name = ""):
self.name = name
| 143 | 15 | 42 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/fitc.py | #This implementation is based on the article:
#
# @article{Quinonero-Candela:2005:UVS:1046920.1194909,
# author = {Qui\~{n}onero-Candela, Joaquin and Rasmussen, Carl Edward},
# title = {A Unifying View of Sparse Approximate Gaussian Process Regression},
# journal = {J. Mach. Learn. Res.},
# issue_date = {12/1/2005},
# volume = {6},
# month = dec,
# year = {2005},
# issn = {1532-4435},
# pages = {1939--1959},
# numpages = {21},
# url = {http://dl.acm.org/citation.cfm?id=1046920.1194909},
# acmid = {1194909},
# publisher = {JMLR.org},
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
"""
Fully-independent Training Conditional (FITC) Preconditioner
"""
class FITC(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
Xm - Inducing points
"""
def __init__(self, X, kern, Xm):
super(FITC, self).__init__("FITC")
M = np.shape(Xm)[0]
N = np.shape(X)[0]
self.kern = kern
start = time.time()
k = kern.K(X,X)
self.nyst = Nystrom(X, kern, Xm, False)
self.diag = np.diag(k - self.nyst.precon + (kern.noise)*np.identity(N))
self.precon = self.nyst.precon + np.diag(self.diag)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
inv = 1 / self.diag
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(self.nyst.Kxm, np.dot(eigv, eigsD))
right = np.dot(eigsD, np.dot(eigv.T, self.nyst.Kxm.T))
return self.woodbury_inversion(np.diag(inv), left, np.identity(M), right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
Ainv = self.Ainv
U = self.leftU
Cinv = self.Cinv
V = self.rightV
right_outer = np.dot(V, np.dot(Ainv, b))
if (self.inner is None):
self.inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(self.inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
M = np.shape(self.nyst.Km)[0]
eigs, eigv = np.linalg.eig(self.nyst.KmInv)
for i in xrange(len(eigv)):
if (eigs[i] < self.kern.jitter):
eigs[i] = self.kern.jitter
eigs[i] = np.sqrt(eigs[i])
eigsD = np.diag(eigs)
left = np.dot(np.diag(Wsqrt.flatten()), np.dot(self.nyst.Kxm, np.dot(eigv, eigsD)))
right = np.dot(eigsD, np.dot(eigv.T, np.dot(self.nyst.Kxm.T, np.diag(Wsqrt.flatten()))))
A = np.reshape(self.diag,(-1,1))*W + 1
Ainv = 1/A
return self.woodbury_inversion(np.diag(Ainv.flatten()), left, np.identity(M), right)
| 3,171 | 26.344828 | 90 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/spectral.py | #This implementation of spectral GP approximation is based on the article:
#
# @article{lazaro2010sparse,
# title={Sparse spectrum Gaussian process regression},
# author={L{\'a}zaro-Gredilla, Miguel and Qui{\~n}onero-Candela, Joaquin and Rasmussen, Carl Edward and Figueiras-Vidal, An{\'\i}bal R},
# journal={The Journal of Machine Learning Research},
# volume={11},
# pages={1865--1881},
# year={2010},
# publisher={JMLR. org}
# }
import numpy as np
import time
from preconditioner import Preconditioner
from nystrom import Nystrom
from ..util.ssgp import SsgpHelper
"""
Random Fourier Features (Spectral) Preconditioner
"""
class Spectral(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
Y - Target labels
kern - Class of kernel function
M - Number of Fourier features
"""
def __init__(self, X, Y, kern, M):
super(Spectral, self).__init__("Spectral")
start = time.time()
self.M = M
self.kern = kern
[N, D] = X.shape
self.N = N
ssgp_helper = SsgpHelper()
S = ssgp_helper.optimize_frequency_points(X, kern, Y, M, D)
W = np.reshape(S, (M, D), order='F')
phi = 2*np.pi*np.dot(X, W.T)
phi = np.sqrt(kern.variance/float(M))*np.hstack((np.cos(phi), np.sin(phi)))
A = np.dot(phi, phi.T) + kern.noise*np.identity(N)
self.precon = A
self.Kxm = phi
self.duration = time.time() - start
"""
Compute inversion of the Preconditioner
"""
def get_inversion(self):
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(2*self.M)
return inv_noise*self.woodbury_inversion(np.identity(self.N), self.Kxm, noise_matrix, self.Kxm.T)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(right_outer, U))
return Ainv - np.dot(np.dot(left_outer, inner), right_outer)
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
return self.laplace_woodbury_inversion(self.Kxm, self.Kxm.T, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(2*self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 2,446 | 28.481928 | 138 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/__init__.py | from preconditioner import Preconditioner
from blockJacobi import BlockJacobi
from nystrom import Nystrom
from svd import SVD
from kiss import Kiss
from pitc import PITC
from fitc import FITC
from spectral import Spectral | 221 | 26.75 | 41 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/blockJacobi.py | import numpy as np
from scipy.linalg import block_diag
from preconditioner import Preconditioner
import time
"""
Block Jacobi Preconditioner
"""
class BlockJacobi(Preconditioner):
"""
Construct preconditioner
X - Training data
kern - Class of kernel function
M - Number of points ber block
"""
def __init__(self, X, kern, M):
super(BlockJacobi, self).__init__("BlockJacobi")
self.M = M
start = time.time()
X_split = np.array_split(X, M)
kern_blocks = np.zeros((M),dtype=object)
for t in xrange(M):
size = np.shape(X_split[t])[0]
kern_blocks[t] = kern.K(X_split[t], X_split[t]) + kern.noise*np.identity(size)
self.duration = time.time()-start
self.blocks = kern_blocks
self.precon = block_diag(*kern_blocks)
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
inverted_blocks[i] = np.linalg.inv(diag_blocks[i])
inverted_diag = block_diag(*inverted_blocks)
return inverted_diag
"""
Compute inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W, Wsqrt):
Wsqrt_split = np.array_split(Wsqrt, self.M)
diag_blocks = self.blocks
inverted_blocks = np.zeros(len(diag_blocks), dtype=object)
for i in xrange(len(diag_blocks)):
Wblock = np.diag(Wsqrt_split[i].flatten())
block = np.dot(Wblock, np.dot(diag_blocks[i], Wblock))
inverted_blocks[i] = np.linalg.inv(block + np.identity(len(Wblock)))
inverted_diag = block_diag(*inverted_blocks)
return inverted_diag
| 1,826 | 28.467742 | 87 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/preconditioners/ilu.py | import numpy as np
from preconditioner import Preconditioner
import time
class ILU(Preconditioner):
def __init__(self, X, kern):
super(ILU, self).__init__("ILU")
start = time.time()
K = kern.K(X,X)
N = np.shape(K)[0]
A = np.copy(K)
for k in xrange(N):
A[k][k] = np.sqrt(K[k][k])
for i in xrange(k+1,N):
if (A[i][k] != 0):
A[i][k] = A[i][k] / A[k][k]
for j in xrange(k+1,N):
for i in xrange(j,N):
if (A[i][j] != 0):
A[i][j] = A[i][j] - A[i][k]*A[j][k]
for i in xrange(N):
for j in xrange(i+1,N):
A[i][j] = 0
self.duration = time.time() - start
#self.precon = np.dot(A, A.conj().T)
self.L = A
| 732 | 20.558824 | 44 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/kernels/rbf.py | import numpy as np
from scipy.spatial.distance import cdist
from kernel import Kernel
"""
Implementation of isotropic RBF/SE kernel
"""
class RBF(Kernel):
def __init__(self, lengthscale=1, variance=1, noise=1):
super(RBF, self).__init__("RBF")
self.lengthscale = lengthscale
self.variance = variance
self.jitter = 1e-9
self.noise = noise / self.variance + self.jitter# dividing by variance for new strategy
def K(self, X1, X2):
""" GP squared exponential kernel """
pairwise_dists = cdist(X1, X2, 'euclidean')
return self.variance*np.exp(-0.5 * (pairwise_dists ** 2) / self.lengthscale ** 2)
def K_scalar(self, X1, X2, original_dimensions):
pairwise_dists = cdist(X1, X2, 'euclidean')
return (self.variance**(float(1)/original_dimensions)) * np.exp(-0.5 * pairwise_dists ** 2 / self.lengthscale ** 2)
| 898 | 33.576923 | 123 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/kernels/matern32.py | import numpy as np
from scipy.spatial.distance import cdist
from kernel import Kernel
"""
Implementation of isotropic Matern-3/2 kernel
"""
class Matern32(Kernel):
def __init__(self, lengthscale=1, variance=1, noise=1):
super(Matern32, self).__init__("Matern 3/2")
self.lengthscale = lengthscale
self.variance = variance
self.noise = noise # adding jitter for numerical stability
def K(self, X1, X2):
""" GP matern-3/2 kernel """
pairwise_dists = cdist(X1, X2, 'euclidean')/self.lengthscale
return self.variance * (1. + np.sqrt(3.) * pairwise_dists) * np.exp(-np.sqrt(3.) * pairwise_dists)
def K_scalar(self, X1, X2, original_dimensions):
raise NotImplementedError | 745 | 31.434783 | 106 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/kernels/__init__.py | from kernel import Kernel
from rbf import RBF
from matern32 import Matern32 | 75 | 24.333333 | 29 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/build/lib/PcgComp/kernels/kernel.py | """
Superclass for classes of Kernel functions.
"""
class Kernel(object):
def __init__(self, name = ""):
self.name = name
"""
Computation of Kernel matrix for the given inputs - Noise excluded
"""
def K(self, X1, X2):
raise NotImplementedError
"""
Computation of scalar Kernel matrix - for grid inputs
"""
def K_scalar(self, X1, X2, original_dimensions):
raise NotImplementedError | 399 | 19 | 67 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/Comparison Results/PowerPlant/plot.py | import sys
import numpy as np
import random as ran
import matplotlib as m
import matplotlib
# import PcgComp
import random as ran
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def show_values(pc, fmt="%s", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
if value < 0:
value = r'$-$'
elif value > 0:
value = r'$+$'
else:
value = r'$\circ$'
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
pp = PdfPages('PowerPlant Data - Precon Results.pdf')
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
names = ['PowerPlant Data - Cg Iterations (log10)']
files = ['iterations.txt']
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=38)
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='YlOrRd', edgecolor='black', linestyle=':', lw=1,vmin=0,vmax=5)
# show_values(im)
#fig.colorbar(im)
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels, label='Lengthscale')
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.set_aspect('equal')
ax.tick_params(axis='x', pad=5)
ax.tick_params(axis='y', pad=3)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=46)
plt.xlabel(r'$\log_{10}(l)$')
plt.ylabel(r'$\log_{10}(\lambda)$')
#plt.title(names[d])
pp.savefig(bbox_inches='tight')
# names = ['Block Jacobi','PITC', 'FITC','Nystrom', 'Spectral', 'Randomized SVD', 'Regularized', 'KISS Interpolation']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt','ConcReg.txt','ConcKron.txt']
names = ['Block Jacobi','PITC', 'FITC','Nystrom', 'Spectral', 'Randomized SVD', 'Regularized', 'SKI']
files = ['PowBlock.txt', 'PowPitc.txt', 'PowFitc.txt','PowNyst.txt','PowSpec.txt', 'PowSvd.txt', 'PowReg.txt', 'PowKron.txt']
# names = ['PowerPlant - Block Precon','PowerPlant - Pitc Precon', 'PowerPlant - Fitc Precon', 'PowerPlant - Nyst Precon', 'PowerPlant - Spec Precon', 'PowerPant - SVD Precon']
# files = ['PowBlock.txt','PowPitc.txt', 'PowFitc.txt', 'PowNyst.txt','PowSpec.txt', 'PowSvd.txt']
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='bwr', edgecolor='black', linestyle=':', lw=1,vmin=-2,vmax=2)
show_values(im)
# fig.colorbar(im)
ax.set_aspect('equal')
# if (d==0 or d==2 or d==4):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=[])
# # plt.ylabel('Noise')
# elif (d==1 or d==3 or d==5):
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=[])
# elif (d==6):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
# # plt.ylabel('Noise')
# else:
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.tick_params(axis='x', pad=40)
ax.tick_params(axis='y', pad=20)
# plt.xlabel('Lengthscale')
# plt.ylabel('Noise')
plt.title(names[d],y=1.08)
pp.savefig(bbox_inches='tight')
pp.close()
| 4,739 | 34.111111 | 176 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/Comparison Results/Concrete/plot.py | import sys
import numpy as np
import random as ran
import matplotlib as m
import matplotlib
# import PcgComp
import random as ran
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def show_values(pc, fmt="%s", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
if value < 0:
value = r'$-$'
elif value > 0:
value = r'$+$'
else:
value = r'$\circ$'
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
pp = PdfPages('Concrete Data - Precon Results.pdf')
names = ['Concrete Data - Cg Iterations (log10)']
files = ['iterations.txt']
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=38)
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='YlOrRd', edgecolor='black', linestyle=':', lw=1,vmin=0,vmax=5)
#show_values(im)
#fig.colorbar(im)
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels, label='Lengthscale')
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.set_aspect('equal')
ax.tick_params(axis='x', pad=5)
ax.tick_params(axis='y', pad=3)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=46)
plt.xlabel(r'$\log_{10}(l)$')
plt.ylabel(r'$\log_{10}(\lambda)$')
#plt.title(names[d])
pp.savefig(bbox_inches='tight')
names = ['Block Jacobi','PITC', 'FITC','Nystrom', 'Spectral', 'Randomized SVD', 'Regularized', 'SKI']
files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt','ConcReg.txt','ConcKron.txt']
# names = ['PowerPlant - Block Precon','PowerPlant - Pitc Precon', 'PowerPlant - Fitc Precon', 'PowerPlant - Nyst Precon', 'PowerPlant - Spec Precon', 'PowerPant - SVD Precon']
# files = ['PowBlock.txt','PowPitc.txt', 'PowFitc.txt', 'PowNyst.txt','PowSpec.txt', 'PowSvd.txt']
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='bwr', edgecolor='black', linestyle=':', lw=1,vmin=-2,vmax=2)
show_values(im)
# fig.colorbar(im)
ax.set_aspect('equal')
# if (d==0 or d==2 or d==4):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=[])
# # plt.ylabel('Noise')
# elif (d==1 or d==3 or d==5):
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=[])
# elif (d==6):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
# # plt.ylabel('Noise')
# else:
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.tick_params(axis='x', pad=40)
ax.tick_params(axis='y', pad=20)
# plt.xlabel('Lengthscale')
# plt.ylabel('Noise')
plt.title(names[d],y=1.08)
pp.savefig(bbox_inches='tight')
pp.close()
| 3,951 | 30.870968 | 176 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/Comparison Results/Protein/plot2.py | import sys
import numpy as np
import random as ran
import matplotlib as m
import matplotlib
import PcgComp
import random as ran
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def show_values(pc, fmt="%.2f", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
color = (0.0, 0.0, 0.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
pp = PdfPages('Protein - Precon Results.pdf')
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
names = ['Protein - Cg Iterations (log10)']
files = ['iterations.txt']
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([1e-3, 1e-2, 1e-1, 1, 10, 100])
column_labels = np.array([1e-1, 1e-2, 1e-3])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='YlOrRd', edgecolor='black', linestyle=':', lw=1,vmin=0,vmax=5)
show_values(im)
fig.colorbar(im)
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels, label='Lengthscale')
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
plt.xlabel('Lengthscale')
plt.ylabel('Noise')
plt.title(names[d])
pp.savefig()
names = ['Protein - Block Precon','Protein - Pitc Precon', 'Protein - Fitc Precon', 'Protein - Nyst Precon', 'Protein - Spec Precon', 'Protein - SVD Precon']
files = ['ProtBlock.txt','ProtPitc.txt', 'ProtFitc.txt', 'ProtNyst.txt','ProtSpec.txt', 'ProtSvd.txt']
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([1e-3, 1e-2, 1e-1, 1, 10, 100])
column_labels = np.array([1e-1, 1e-2, 1e-3])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='bwr', edgecolor='black', linestyle=':', lw=1,vmin=-2,vmax=2)
show_values(im)
fig.colorbar(im)
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels, label='Lengthscale')
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
plt.xlabel('Lengthscale')
plt.ylabel('Noise')
plt.title(names[d])
pp.savefig()
pp.close() | 3,136 | 32.37234 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/Comparison Results/Protein/plot.py | import sys
import numpy as np
import random as ran
import matplotlib as m
import matplotlib
# import PcgComp
import random as ran
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def show_values(pc, fmt="%s", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
if value < 0:
value = r'$-$'
elif value > 0:
value = r'$+$'
else:
value = r'$\circ$'
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
pp = PdfPages('Protein Data - Precon Results.pdf')
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
names = ['Protein Data - Cg Iterations (log10)']
files = ['iterations.txt']
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=38)
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='YlOrRd', edgecolor='black', linestyle=':', lw=1,vmin=0,vmax=5)
#show_values(im)
#fig.colorbar(im)
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels, label='Lengthscale')
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.set_aspect('equal')
ax.tick_params(axis='x', pad=5)
ax.tick_params(axis='y', pad=3)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=46)
plt.xlabel(r'$\log_{10}(l)$')
plt.ylabel(r'$\log_{10}(\lambda)$')
#plt.title(names[d])
pp.savefig(bbox_inches='tight')
# names = ['Block Jacobi','PITC', 'FITC','Nystrom', 'Spectral', 'Randomized SVD', 'Regularized', 'KISS Interpolation']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt','ConcReg.txt','ConcKron.txt']
names = ['Block Jacobi','PITC', 'FITC', 'Nystrom', 'Spectral', 'Randomized SVD']
files = ['ProtBlock.txt','ProtPitc.txt', 'ProtFitc.txt', 'ProtNyst.txt','ProtSpec.txt', 'ProtSvd.txt']
# names = ['PowerPlant - Block Precon','PowerPlant - Pitc Precon', 'PowerPlant - Fitc Precon', 'PowerPlant - Nyst Precon', 'PowerPlant - Spec Precon', 'PowerPant - SVD Precon']
# files = ['PowBlock.txt','PowPitc.txt', 'PowFitc.txt', 'PowNyst.txt','PowSpec.txt', 'PowSvd.txt']
for d in xrange(len(files)):
data = np.loadtxt(files[d],delimiter=',')
# for i in xrange(np.shape(data)[0]):
# for j in xrange(np.shape(data)[1]):
# if (data[i][j]<1 and data[i][j]!=0):
# data[i][j] = -1/data[i][j]
row_labels = np.array([-3, -2, -1, 0, 1, 2])
column_labels = np.array([-2, -4, -6])
fig, ax = plt.subplots()
im = ax.pcolor(data, cmap='bwr', edgecolor='black', linestyle=':', lw=1,vmin=-2,vmax=2)
show_values(im)
# fig.colorbar(im)
ax.set_aspect('equal')
# if (d==0 or d==2 or d==4):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=[])
# # plt.ylabel('Noise')
# elif (d==1 or d==3 or d==5):
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=[])
# elif (d==6):
# ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
# # plt.ylabel('Noise')
# else:
# ax.yaxis.set(ticks=[])
# ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
# # plt.xlabel('Lengthscale')
ax.yaxis.set(ticks=np.arange(0.5, len(column_labels)), ticklabels=column_labels)
ax.xaxis.set(ticks=np.arange(0.5, len(row_labels)), ticklabels=row_labels)
ax.tick_params(axis='x', pad=40)
ax.tick_params(axis='y', pad=20)
# plt.xlabel('Lengthscale')
# plt.ylabel('Noise')
plt.title(names[d],y=1.08)
pp.savefig(bbox_inches='tight')
pp.close()
| 4,695 | 34.044776 | 176 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ProtResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_PROTEIN_FOLD_1.txt','FIC_PROTEIN_FOLD_2.txt', 'FIC_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ProtResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_PROTEIN_FOLD_1.txt','PIC_PROTEIN_FOLD_2.txt', 'PIC_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ProtResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Protein - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,503 | 33.181818 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ProtResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_PROTEIN_FOLD_1.txt','VAR_PROTEIN_FOLD_2.txt', 'VAR_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ConcResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CONCRETE_FOLD_1.txt','FIC_CONCRETE_FOLD_2.txt', 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ConcResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CONCRETE_FOLD_1.txt','PIC_CONCRETE_FOLD_2.txt', 'PIC_CONCRETE_FOLD_3.txt', 'PIC_CONCRETE_FOLD_4.txt','PIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ConcResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Concrete - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,506 | 33.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/ConcResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CONCRETE_FOLD_1.txt','VAR_CONCRETE_FOLD_2.txt', 'VAR_CONCRETE_FOLD_3.txt', 'VAR_CONCRETE_FOLD_4.txt','VAR_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/PowResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_POWER_FOLD_1.txt','FIC_POWER_FOLD_2.txt', 'FIC_POWER_FOLD_3.txt', 'FIC_POWER_FOLD_4.txt','FIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/PowResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_POWER_FOLD_1.txt','PIC_POWER_FOLD_2.txt', 'PIC_POWER_FOLD_3.txt', 'PIC_POWER_FOLD_4.txt','PIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/PowResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('PowerPlant - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,514 | 33.431818 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - N^(3:2)/PowResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_POWER_FOLD_1.txt','VAR_POWER_FOLD_2.txt', 'VAR_POWER_FOLD_3.txt', 'VAR_POWER_FOLD_4.txt','VAR_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ProtResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_PROTEIN_FOLD_1.txt','FIC_PROTEIN_FOLD_2.txt', 'FIC_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ProtResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_PROTEIN_FOLD_1.txt','PIC_PROTEIN_FOLD_2.txt', 'PIC_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ProtResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Protein - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,493 | 33.744186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ProtResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_PROTEIN_FOLD_1.txt','VAR_PROTEIN_FOLD_2.txt', 'VAR_PROTEIN_FOLD_3.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,405 | 31.697674 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ConcResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CONCRETE_FOLD_1.txt','FIC_CONCRETE_FOLD_2.txt', 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ConcResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CONCRETE_FOLD_1.txt','PIC_CONCRETE_FOLD_2.txt', 'PIC_CONCRETE_FOLD_3.txt', 'PIC_CONCRETE_FOLD_4.txt','PIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ConcResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Concrete - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,496 | 33.813953 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/ConcResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CONCRETE_FOLD_1.txt','VAR_CONCRETE_FOLD_2.txt', 'VAR_CONCRETE_FOLD_3.txt', 'VAR_CONCRETE_FOLD_4.txt','VAR_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/PowResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_POWER_FOLD_1.txt','FIC_POWER_FOLD_2.txt', 'FIC_POWER_FOLD_3.txt', 'FIC_POWER_FOLD_4.txt','FIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/PowResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_POWER_FOLD_1.txt','PIC_POWER_FOLD_2.txt', 'PIC_POWER_FOLD_3.txt', 'PIC_POWER_FOLD_4.txt','PIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/PowResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('PowerPlant - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,504 | 34 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF_OLD/RBF - SqrtN/PowResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_POWER_FOLD_1.txt','VAR_POWER_FOLD_2.txt', 'VAR_POWER_FOLD_3.txt', 'VAR_POWER_FOLD_4.txt','VAR_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/ConcResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CONCRETE_FOLD_1.txt','FIC_CONCRETE_FOLD_2.txt', 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/ConcResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CONCRETE_FOLD_1.txt','PIC_CONCRETE_FOLD_2.txt', 'PIC_CONCRETE_FOLD_3.txt', 'PIC_CONCRETE_FOLD_4.txt','PIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/ConcResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Concrete - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,506 | 33.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/ConcResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CONCRETE_FOLD_1.txt','VAR_CONCRETE_FOLD_2.txt', 'VAR_CONCRETE_FOLD_3.txt', 'VAR_CONCRETE_FOLD_4.txt','VAR_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,461 | 33 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/PowResultsGps/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_POWER_FOLD_1.txt','FIC_POWER_FOLD_2.txt', 'FIC_POWER_FOLD_3.txt', 'FIC_POWER_FOLD_4.txt','FIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/PowResultsGps/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_POWER_FOLD_1.txt','PIC_POWER_FOLD_2.txt', 'PIC_POWER_FOLD_3.txt', 'PIC_POWER_FOLD_4.txt','PIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/PowResultsGps/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('PowerPlant - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,514 | 33.431818 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD_OLD/PowResultsGps/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_POWER_FOLD_1.txt','VAR_POWER_FOLD_2.txt', 'VAR_POWER_FOLD_3.txt', 'VAR_POWER_FOLD_4.txt','VAR_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 100 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,446 | 32.651163 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_POWER/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_POWER_FOLD_1.txt','FIC_POWER_FOLD_2.txt', 'FIC_POWER_FOLD_3.txt', 'FIC_POWER_FOLD_4.txt','FIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,445 | 32.627907 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_POWER/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_POWER_FOLD_1.txt','PIC_POWER_FOLD_2.txt', 'PIC_POWER_FOLD_3.txt', 'PIC_POWER_FOLD_4.txt','PIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,445 | 32.627907 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_POWER/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('PowerPlant - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Power Plant')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,514 | 33.431818 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_POWER/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_POWER_FOLD_1.txt','VAR_POWER_FOLD_2.txt', 'VAR_POWER_FOLD_3.txt', 'VAR_POWER_FOLD_4.txt','VAR_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,445 | 32.627907 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_CONC/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CONCRETE_FOLD_1.txt','FIC_CONCRETE_FOLD_2.txt', 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_CONC/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CONCRETE_FOLD_1.txt','PIC_CONCRETE_FOLD_2.txt', 'PIC_CONCRETE_FOLD_3.txt', 'PIC_CONCRETE_FOLD_4.txt','PIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_CONC/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Concrete - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,506 | 33.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_CONC/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CONCRETE_FOLD_1.txt','VAR_CONCRETE_FOLD_2.txt', 'VAR_CONCRETE_FOLD_3.txt', 'VAR_CONCRETE_FOLD_4.txt','VAR_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_PROTEIN/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_PROTEIN_FOLD_1.txt','FIC_PROTEIN_FOLD_2.txt']#, 'FIC_PROTEIN_FOLD_3.txt' ]
data = np.zeros(2, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,407 | 31.744186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_PROTEIN/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_PROTEIN_FOLD_1.txt','PIC_PROTEIN_FOLD_2.txt']#, 'PIC_PROTEIN_FOLD_3.txt' ]
data = np.zeros(3, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,407 | 31.744186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_PROTEIN/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Protein - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(3, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(3, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Protein')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,503 | 33.181818 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/ARD/ARD_RESULTS_PROTEIN/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_PROTEIN_FOLD_1.txt','VAR_PROTEIN_FOLD_2.txt']#, 'VAR_PROTEIN_FOLD_3.txt' ]
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,407 | 31.744186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_CREDIT/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CLASS_CREDIT_FOLD_1.txt','FIC_CLASS_CREDIT_FOLD_2.txt', 'FIC_CLASS_CREDIT_FOLD_3.txt', 'FIC_CLASS_CREDIT_FOLD_4.txt','FIC_CLASS_CREDIT_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,480 | 33.44186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_CREDIT/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CLASS_CREDIT_FOLD_1.txt','PIC_CLASS_CREDIT_FOLD_2.txt', 'PIC_CLASS_CREDIT_FOLD_3.txt', 'PIC_CLASS_CREDIT_FOLD_4.txt','PIC_CLASS_CREDIT_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,480 | 33.44186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_CREDIT/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Credit - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Credit')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Credit')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,500 | 33.113636 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_CREDIT/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CLASS_CREDIT_FOLD_1.txt','VAR_CLASS_CREDIT_FOLD_2.txt', 'VAR_CLASS_CREDIT_FOLD_3.txt', 'VAR_CLASS_CREDIT_FOLD_4.txt','VAR_CLASS_CREDIT_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,480 | 33.44186 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_SPAM/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CLASS_SPAM_FOLD_1.txt','FIC_CLASS_SPAM_FOLD_2.txt','FIC_CLASS_SPAM_FOLD_3.txt']#, 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(3, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,494 | 33.767442 | 175 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_SPAM/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CLASS_SPAM_FOLD_1.txt','PIC_CLASS_SPAM_FOLD_2.txt','PIC_CLASS_SPAM_FOLD_3.txt']#, 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(3, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,494 | 33.767442 | 175 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_SPAM/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Spam - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Spam')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,498 | 33.068182 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_SPAM/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CLASS_SPAM_FOLD_1.txt','VAR_CLASS_SPAM_FOLD_2.txt', 'VAR_CLASS_SPAM_FOLD_3.txt']#, 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(3, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,495 | 33.790698 | 176 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_CONCRETE/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_CONCRETE_FOLD_1.txt','FIC_CONCRETE_FOLD_2.txt', 'FIC_CONCRETE_FOLD_3.txt', 'FIC_CONCRETE_FOLD_4.txt','FIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_CONCRETE/averagePicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['PIC_CONCRETE_FOLD_1.txt','PIC_CONCRETE_FOLD_2.txt', 'PIC_CONCRETE_FOLD_3.txt', 'PIC_CONCRETE_FOLD_4.txt','PIC_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_CONCRETE/plots.py | import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
pp = PdfPages('Concrete - GpStuff Results.pdf')
files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Negative Marginal Log Likelihood')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
plt.clf()
files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt']
names = ['FIC','VAR','PIC']
data = np.zeros(5, dtype=object)
for d in xrange(len(files)):
data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2))
plt.plot(data[:,0], data[:,1], label=names[d])
plt.legend(loc='upper left')
plt.title('Concrete')
plt.ylabel('Mean Squared Error')
plt.xlabel("Time taken (seconds - log)")
pp.savefig()
pp.close() | 1,506 | 33.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_CONCRETE/averageVarNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['VAR_CONCRETE_FOLD_1.txt','VAR_CONCRETE_FOLD_2.txt', 'VAR_CONCRETE_FOLD_3.txt', 'VAR_CONCRETE_FOLD_4.txt','VAR_CONCRETE_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,460 | 32.976744 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_POWER/averageFicNML.py | import numpy as np
# names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD']
# files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt']
# names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon']
# files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt']
files = ['FIC_POWER_FOLD_1.txt','FIC_POWER_FOLD_2.txt', 'FIC_POWER_FOLD_3.txt', 'FIC_POWER_FOLD_4.txt','FIC_POWER_FOLD_5.txt']
data = np.zeros(5, dtype=object)
iterations = 5
for d in xrange(len(files)):
data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4))
lastT = np.zeros(len(files))
lastE = np.zeros(len(files))
count = 0
while(True):
occur = 0
totalT = 0
totalE = 0
iterations = 0
count = count + 1
for d in xrange(len(files)):
fold = data[d]
if len(fold) >= count:
occur = occur+1
iterations = fold[count-1][0]
totalT = totalT + fold[count-1][1]
totalE = totalE + fold[count-1][2]
lastT[d] = fold[count-1][1]
lastE[d] = fold[count-1][2]
else:
totalE = totalE + lastE[d]
if (occur == 0):
break
avgT = totalT / occur
avgE = totalE / len(files)
if (count < 10 or count % 10 == 0):
print '%s %s %s' % (iterations, np.log10(avgT), avgE)
| 1,445 | 32.627907 | 168 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.