reverted stupid merge error

This commit is contained in:
Max Zwiessele 2013-07-18 15:15:09 +01:00
parent a27557e196
commit dcec9d2a25
8 changed files with 41 additions and 17 deletions

View file

@ -96,7 +96,8 @@ class GP(GPBase):
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -
0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)
def _log_likelihood_gradients(self):

View file

@ -108,7 +108,7 @@ class SparseGP(GPBase):
self.B = np.eye(self.num_inducing) + self.A
self.LB = jitchol(self.B)
#VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
self.psi1Vf = np.dot(self.psi1.T, self.likelihood.VVT_factor)
# back substutue C into psi1Vf
@ -163,7 +163,7 @@ class SparseGP(GPBase):
def log_likelihood(self):
""" Compute the (lower bound on the) log marginal likelihood """
if self.likelihood.is_heteroscedastic:
A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V*self.likelihood.Y)
A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
else:
A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
@ -246,7 +246,7 @@ class SparseGP(GPBase):
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.num_inducing) - Bi)
if self.Cpsi1V is None:
psi1V = np.dot(self.psi1.T,self.likelihood.V)
psi1V = np.dot(self.psi1.T, self.likelihood.V)
tmp, _ = dtrtrs(self.Lm, np.asfortranarray(psi1V), lower=1, trans=0)
tmp, _ = dpotrs(self.LB, tmp, lower=1)
self.Cpsi1V, _ = dtrtrs(self.Lm, tmp, lower=1, trans=1)

View file

@ -4,7 +4,7 @@
import numpy as np
import pylab as pb
from .. import kern
from ..util.linalg import linalg, pdinv, mdot, tdot, dpotrs, dtrtrs, jitchol, backsub_both_sides
from ..util.linalg import pdinv, mdot, tdot, dpotrs, dtrtrs, jitchol, backsub_both_sides
from ..likelihoods import EP
from gp_base import GPBase
from model import Model
@ -269,6 +269,7 @@ class SVIGP(GPBase):
def optimize(self, iterations, print_interval=10, callback=lambda:None, callback_interval=5):
param_step = 0.
#Iterate!
for i in range(iterations):
@ -287,7 +288,6 @@ class SVIGP(GPBase):
#compute the steps in all parameters
vb_step = self.vb_steplength*natgrads[0]
if (self.epochs>=1):#only move the parameters after the first epoch
# print "it {} ep {} par {}".format(self.iterations, self.epochs, param_step)
param_step = self.momentum*param_step + self.param_steplength*grads
else:
param_step = 0.
@ -295,8 +295,8 @@ class SVIGP(GPBase):
self.set_vb_param(self.get_vb_param() + vb_step)
#Note: don't recompute everything here, wait until the next iteration when we have a new batch
self._set_params(self._untransform_params(self._get_params_transformed() + param_step), computations=False)
#print messages if desired
if i and (not i%print_interval):
print i, np.mean(self._ll_trace[-print_interval:]) #, self.log_likelihood()
print np.round(np.mean(self._grad_trace[-print_interval:],0),3)

View file

@ -24,7 +24,7 @@ def BGPLVM(seed=default_seed):
Y = np.random.multivariate_normal(np.zeros(N), K, Q).T
lik = Gaussian(Y, normalize=True)
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
k = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)

View file

@ -67,8 +67,8 @@ def toy_ARD(optim_iters=1000, kernel_type='linear', N=300, D=4):
X4 = np.log(np.sort(np.random.rand(N,1),0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asmatrix(2*X[:,0]+3).T
Y2 = np.asmatrix(4*(X[:,2]-1.5*X[:,0])).T
Y1 = np.asarray(2*X[:,0]+3).T
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0])).T
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2,D));

View file

@ -115,8 +115,8 @@ class BayesianGPLVM(SparseGP, GPLVM):
self.dbound_dZtheta = SparseGP._log_likelihood_gradients(self)
return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta))
def plot_latent(self, *args, **kwargs):
return plot_latent.plot_latent(self, *args, **kwargs)
def plot_latent(self, plot_inducing=True, *args, **kwargs):
return plot_latent.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs)
def do_test_latents(self, Y):
"""

View file

@ -36,10 +36,10 @@ class GPLVM(GP):
self.ensure_default_constraints()
def initialise_latent(self, init, input_dim, Y):
Xr = np.random.randn(Y.shape[0], input_dim)
if init == 'PCA':
return PCA(Y, input_dim)[0]
else:
return np.random.randn(Y.shape[0], input_dim)
Xr[:, :Y.shape[1]] = PCA(Y, input_dim)[0]
return Xr
def getstate(self):
return GP.getstate(self)

View file

@ -23,7 +23,7 @@ class GradientTests(unittest.TestCase):
self.X2D = np.random.uniform(-3., 3., (40, 2))
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05
def check_model_with_white(self, kern, model_type='GPRegression', dimension=1):
def check_model_with_white(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False):
# Get the correct gradients
if dimension == 1:
X = self.X1D
@ -36,7 +36,10 @@ class GradientTests(unittest.TestCase):
noise = GPy.kern.white(dimension)
kern = kern + noise
m = model_fit(X, Y, kernel=kern)
if uncertain_inputs:
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
else:
m = model_fit(X, Y, kernel=kern)
m.randomize()
# contrain all parameters to be positive
self.assertTrue(m.checkgrad())
@ -141,6 +144,26 @@ class GradientTests(unittest.TestCase):
rbf = GPy.kern.rbf(2)
self.check_model_with_white(rbf, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear and white kernel on 2d data with uncertain inputs'''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)
def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear and white kernel on 1d data with uncertain inputs'''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)
def test_GPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias and white kernel """
N, input_dim, D = 50, 1, 2