mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-03 16:52:39 +02:00
reverted stupid merge error
This commit is contained in:
parent
a27557e196
commit
dcec9d2a25
8 changed files with 41 additions and 17 deletions
|
|
@ -96,7 +96,8 @@ class GP(GPBase):
|
||||||
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
|
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
|
||||||
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
|
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
|
||||||
"""
|
"""
|
||||||
return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
|
return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -
|
||||||
|
0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)
|
||||||
|
|
||||||
|
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
import pylab as pb
|
||||||
from .. import kern
|
from .. import kern
|
||||||
from ..util.linalg import linalg, pdinv, mdot, tdot, dpotrs, dtrtrs, jitchol, backsub_both_sides
|
from ..util.linalg import pdinv, mdot, tdot, dpotrs, dtrtrs, jitchol, backsub_both_sides
|
||||||
from ..likelihoods import EP
|
from ..likelihoods import EP
|
||||||
from gp_base import GPBase
|
from gp_base import GPBase
|
||||||
from model import Model
|
from model import Model
|
||||||
|
|
@ -269,6 +269,7 @@ class SVIGP(GPBase):
|
||||||
def optimize(self, iterations, print_interval=10, callback=lambda:None, callback_interval=5):
|
def optimize(self, iterations, print_interval=10, callback=lambda:None, callback_interval=5):
|
||||||
|
|
||||||
param_step = 0.
|
param_step = 0.
|
||||||
|
|
||||||
#Iterate!
|
#Iterate!
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
|
|
||||||
|
|
@ -287,7 +288,6 @@ class SVIGP(GPBase):
|
||||||
#compute the steps in all parameters
|
#compute the steps in all parameters
|
||||||
vb_step = self.vb_steplength*natgrads[0]
|
vb_step = self.vb_steplength*natgrads[0]
|
||||||
if (self.epochs>=1):#only move the parameters after the first epoch
|
if (self.epochs>=1):#only move the parameters after the first epoch
|
||||||
# print "it {} ep {} par {}".format(self.iterations, self.epochs, param_step)
|
|
||||||
param_step = self.momentum*param_step + self.param_steplength*grads
|
param_step = self.momentum*param_step + self.param_steplength*grads
|
||||||
else:
|
else:
|
||||||
param_step = 0.
|
param_step = 0.
|
||||||
|
|
@ -295,8 +295,8 @@ class SVIGP(GPBase):
|
||||||
self.set_vb_param(self.get_vb_param() + vb_step)
|
self.set_vb_param(self.get_vb_param() + vb_step)
|
||||||
#Note: don't recompute everything here, wait until the next iteration when we have a new batch
|
#Note: don't recompute everything here, wait until the next iteration when we have a new batch
|
||||||
self._set_params(self._untransform_params(self._get_params_transformed() + param_step), computations=False)
|
self._set_params(self._untransform_params(self._get_params_transformed() + param_step), computations=False)
|
||||||
#print messages if desired
|
|
||||||
|
|
||||||
|
#print messages if desired
|
||||||
if i and (not i%print_interval):
|
if i and (not i%print_interval):
|
||||||
print i, np.mean(self._ll_trace[-print_interval:]) #, self.log_likelihood()
|
print i, np.mean(self._ll_trace[-print_interval:]) #, self.log_likelihood()
|
||||||
print np.round(np.mean(self._grad_trace[-print_interval:],0),3)
|
print np.round(np.mean(self._grad_trace[-print_interval:],0),3)
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ def BGPLVM(seed=default_seed):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N), K, Q).T
|
Y = np.random.multivariate_normal(np.zeros(N), K, Q).T
|
||||||
lik = Gaussian(Y, normalize=True)
|
lik = Gaussian(Y, normalize=True)
|
||||||
|
|
||||||
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
|
k = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -67,8 +67,8 @@ def toy_ARD(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
||||||
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
||||||
X = np.hstack((X1, X2, X3, X4))
|
X = np.hstack((X1, X2, X3, X4))
|
||||||
|
|
||||||
Y1 = np.asmatrix(2*X[:,0]+3).T
|
Y1 = np.asarray(2*X[:,0]+3).T
|
||||||
Y2 = np.asmatrix(4*(X[:,2]-1.5*X[:,0])).T
|
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0])).T
|
||||||
Y = np.hstack((Y1, Y2))
|
Y = np.hstack((Y1, Y2))
|
||||||
|
|
||||||
Y = np.dot(Y, np.random.rand(2,D));
|
Y = np.dot(Y, np.random.rand(2,D));
|
||||||
|
|
|
||||||
|
|
@ -115,8 +115,8 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
||||||
self.dbound_dZtheta = SparseGP._log_likelihood_gradients(self)
|
self.dbound_dZtheta = SparseGP._log_likelihood_gradients(self)
|
||||||
return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta))
|
return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta))
|
||||||
|
|
||||||
def plot_latent(self, *args, **kwargs):
|
def plot_latent(self, plot_inducing=True, *args, **kwargs):
|
||||||
return plot_latent.plot_latent(self, *args, **kwargs)
|
return plot_latent.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs)
|
||||||
|
|
||||||
def do_test_latents(self, Y):
|
def do_test_latents(self, Y):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -36,10 +36,10 @@ class GPLVM(GP):
|
||||||
self.ensure_default_constraints()
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
def initialise_latent(self, init, input_dim, Y):
|
def initialise_latent(self, init, input_dim, Y):
|
||||||
|
Xr = np.random.randn(Y.shape[0], input_dim)
|
||||||
if init == 'PCA':
|
if init == 'PCA':
|
||||||
return PCA(Y, input_dim)[0]
|
Xr[:, :Y.shape[1]] = PCA(Y, input_dim)[0]
|
||||||
else:
|
return Xr
|
||||||
return np.random.randn(Y.shape[0], input_dim)
|
|
||||||
|
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
return GP.getstate(self)
|
return GP.getstate(self)
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ class GradientTests(unittest.TestCase):
|
||||||
self.X2D = np.random.uniform(-3., 3., (40, 2))
|
self.X2D = np.random.uniform(-3., 3., (40, 2))
|
||||||
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05
|
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05
|
||||||
|
|
||||||
def check_model_with_white(self, kern, model_type='GPRegression', dimension=1):
|
def check_model_with_white(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False):
|
||||||
# Get the correct gradients
|
# Get the correct gradients
|
||||||
if dimension == 1:
|
if dimension == 1:
|
||||||
X = self.X1D
|
X = self.X1D
|
||||||
|
|
@ -36,6 +36,9 @@ class GradientTests(unittest.TestCase):
|
||||||
|
|
||||||
noise = GPy.kern.white(dimension)
|
noise = GPy.kern.white(dimension)
|
||||||
kern = kern + noise
|
kern = kern + noise
|
||||||
|
if uncertain_inputs:
|
||||||
|
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
|
||||||
|
else:
|
||||||
m = model_fit(X, Y, kernel=kern)
|
m = model_fit(X, Y, kernel=kern)
|
||||||
m.randomize()
|
m.randomize()
|
||||||
# contrain all parameters to be positive
|
# contrain all parameters to be positive
|
||||||
|
|
@ -141,6 +144,26 @@ class GradientTests(unittest.TestCase):
|
||||||
rbf = GPy.kern.rbf(2)
|
rbf = GPy.kern.rbf(2)
|
||||||
self.check_model_with_white(rbf, model_type='SparseGPRegression', dimension=2)
|
self.check_model_with_white(rbf, model_type='SparseGPRegression', dimension=2)
|
||||||
|
|
||||||
|
def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
|
||||||
|
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
|
||||||
|
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
|
||||||
|
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1)
|
||||||
|
|
||||||
|
def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
|
||||||
|
''' Testing the sparse GP regression with rbf and white kernel on 2d data '''
|
||||||
|
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
|
||||||
|
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2)
|
||||||
|
|
||||||
|
def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self):
|
||||||
|
''' Testing the sparse GP regression with rbf, linear and white kernel on 2d data with uncertain inputs'''
|
||||||
|
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
|
||||||
|
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)
|
||||||
|
|
||||||
|
def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self):
|
||||||
|
''' Testing the sparse GP regression with rbf, linear and white kernel on 1d data with uncertain inputs'''
|
||||||
|
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
|
||||||
|
self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)
|
||||||
|
|
||||||
def test_GPLVM_rbf_bias_white_kern_2D(self):
|
def test_GPLVM_rbf_bias_white_kern_2D(self):
|
||||||
""" Testing GPLVM with rbf + bias and white kernel """
|
""" Testing GPLVM with rbf + bias and white kernel """
|
||||||
N, input_dim, D = 50, 1, 2
|
N, input_dim, D = 50, 1, 2
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue