Changed kernels in tests (lots still failing, but now mostly for good reason rather than silly naming problems)

This commit is contained in:
Alan Saul 2014-03-04 11:28:29 +00:00
parent ca40b9803a
commit 9888d768b5
7 changed files with 92 additions and 87 deletions

View file

@ -30,7 +30,7 @@ def student_t_approx(optimize=True, plot=True):
#Yc = Yc/Yc.max()
#Add student t random noise to datapoints
deg_free = 5
deg_free = 1
print "Real noise: ", real_std
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
@ -44,34 +44,39 @@ def student_t_approx(optimize=True, plot=True):
#Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1['white'].constrain_fixed(1e-5)
m1['.*white'].constrain_fixed(1e-5)
m1.randomize()
#Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2['white'].constrain_fixed(1e-5)
m2['.*white'].constrain_fixed(1e-5)
m2.randomize()
#Student t GP model on clean data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
m3['t_noise'].constrain_bounded(1e-6, 10.)
m3['white'].constrain_fixed(1e-5)
m3['.*t_noise'].constrain_bounded(1e-6, 10.)
m3['.*white'].constrain_fixed(1e-5)
m3.randomize()
debug = True
print m3
if debug:
m3.optimize(messages=1)
return m3
#Student t GP model on corrupt data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
m4['t_noise'].constrain_bounded(1e-6, 10.)
m4['white'].constrain_fixed(1e-5)
m4['.*t_noise'].constrain_bounded(1e-6, 10.)
m4['.*white'].constrain_fixed(1e-5)
m4.randomize()
print m4
debug=True
if debug:
m4.optimize(messages=1)
import pylab as pb
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, 'rx')
m4.plot()
print m4
return m4
if optimize:
optimizer='scg'

View file

@ -11,7 +11,7 @@
#http://gaussianprocess.org/gpml/code.
import numpy as np
from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify
from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv
from ...util.misc import param_to_array
from posterior import Posterior
import warnings
@ -149,7 +149,7 @@ class Laplace(object):
#compute vital matrices
C = np.dot(LiW12, K)
Ki_W_i = K - C.T.dot(C)
Ki_W_i = K - C.T.dot(C) #Could this be wrong?
#compute the log marginal
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, extra_data=Y_metadata) - np.sum(np.log(np.diag(L)))

View file

@ -17,7 +17,7 @@ class Kernel(Mapping):
:type X: ndarray
:param output_dim: dimension of output.
:type output_dim: int
:param kernel: a GPy kernel, defaults to GPy.kern.rbf
:param kernel: a GPy kernel, defaults to GPy.kern.RBF
:type kernel: GPy.kern.kern
"""
@ -25,7 +25,7 @@ class Kernel(Mapping):
def __init__(self, X, output_dim=1, kernel=None):
Mapping.__init__(self, input_dim=X.shape[1], output_dim=output_dim)
if kernel is None:
kernel = GPy.kern.rbf(self.input_dim)
kernel = GPy.kern.RBF(self.input_dim)
self.kern = kernel
self.X = X
self.num_data = X.shape[0]
@ -43,7 +43,7 @@ class Kernel(Mapping):
def _set_params(self, x):
self.A = x[:self.num_data * self.output_dim].reshape(self.num_data, self.output_dim).copy()
self.bias = x[self.num_data*self.output_dim:].copy()
def randomize(self):
self.A = np.random.randn(self.num_data, self.output_dim)/np.sqrt(self.num_data+1)
self.bias = np.random.randn(self.output_dim)/np.sqrt(self.num_data+1)

View file

@ -9,8 +9,8 @@ import numpy as np
from GPy import testing
import sys
import numpy
from GPy.kern.parts.rbf import RBF
from GPy.kern.parts.linear import Linear
from GPy.kern import RBF
from GPy.kern import Linear
from copy import deepcopy
__test__ = lambda: 'deep' in sys.argv
@ -36,7 +36,7 @@ class Test(unittest.TestCase):
indices = numpy.cumsum(i_s_dim_list).tolist()
input_slices = [slice(a,b) for a,b in zip([None]+indices, indices)]
#input_slices[2] = deepcopy(input_slices[1])
input_slice_kern = GPy.kern.kern(9,
input_slice_kern = GPy.kern.kern(9,
[
RBF(i_s_dim_list[0], np.random.rand(), np.random.rand(i_s_dim_list[0]), ARD=True),
RBF(i_s_dim_list[1], np.random.rand(), np.random.rand(i_s_dim_list[1]), ARD=True),
@ -51,8 +51,8 @@ class Test(unittest.TestCase):
# GPy.kern.bias(self.input_dim) +
# GPy.kern.white(self.input_dim)),
(#GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True)
GPy.kern.linear(self.input_dim, np.random.rand(self.input_dim), ARD=True)
+GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True)
GPy.kern.Linear(self.input_dim, np.random.rand(self.input_dim), ARD=True)
+GPy.kern.RBF(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True)
# +GPy.kern.bias(self.input_dim)
# +GPy.kern.white(self.input_dim)),
),

View file

@ -25,10 +25,10 @@ class PsiStatModel(Model):
self.kern = kernel
self.psi_ = self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance)
self.add_parameters(self.X, self.X_variance, self.Z, self.kern)
def log_likelihood(self):
return self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance).sum()
def parameters_changed(self):
psimu, psiS = self.kern.__getattribute__("d" + self.which + "_dmuS")(numpy.ones_like(self.psi_), self.Z, self.X, self.X_variance)
self.X.gradient = psimu
@ -43,9 +43,9 @@ class PsiStatModel(Model):
if self.which == 'psi0': dL_dpsi0 += 1
if self.which == 'psi1': dL_dpsi1 += 1
if self.which == 'psi2': dL_dpsi2 += 1
self.kern.update_gradients_variational(numpy.zeros([1,1]),
dL_dpsi0,
dL_dpsi1,
self.kern.update_gradients_variational(numpy.zeros([1,1]),
dL_dpsi0,
dL_dpsi1,
dL_dpsi2, self.X, self.X_variance, self.Z)
class DPsiStatTest(unittest.TestCase):
@ -57,14 +57,14 @@ class DPsiStatTest(unittest.TestCase):
X_var = .5 * numpy.ones_like(X) + .4 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
Z = numpy.random.permutation(X)[:num_inducing]
Y = X.dot(numpy.random.randn(input_dim, input_dim))
# kernels = [GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)), GPy.kern.rbf(input_dim, ARD=True), GPy.kern.bias(input_dim)]
# kernels = [GPy.kern.Linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)), GPy.kern.RBF(input_dim, ARD=True), GPy.kern.Bias(input_dim)]
kernels = [
GPy.kern.linear(input_dim),
GPy.kern.rbf(input_dim),
#GPy.kern.bias(input_dim),
#GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim),
#GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)
GPy.kern.Linear(input_dim),
GPy.kern.RBF(input_dim),
#GPy.kern.Bias(input_dim),
#GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim),
#GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim)
]
def testPsi0(self):
@ -73,7 +73,7 @@ class DPsiStatTest(unittest.TestCase):
num_inducing=self.num_inducing, kernel=k)
m.randomize()
assert m.checkgrad(), "{} x psi0".format("+".join(map(lambda x: x.name, k._parameters_)))
def testPsi1(self):
for k in self.kernels:
m = PsiStatModel('psi1', X=self.X, X_variance=self.X_var, Z=self.Z,
@ -119,11 +119,11 @@ if __name__ == "__main__":
if interactive:
# N, num_inducing, input_dim, input_dim = 30, 5, 4, 30
# X = numpy.random.rand(N, input_dim)
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
# K = k.K(X)
# Y = numpy.random.multivariate_normal(numpy.zeros(N), K, input_dim).T
# Y -= Y.mean(axis=0)
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
# m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
# m.randomize()
# # self.assertTrue(m.checkgrad())
@ -136,11 +136,11 @@ if __name__ == "__main__":
X_var = .5 * numpy.ones_like(X) + .1 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
Z = numpy.random.permutation(X)[:num_inducing]
Y = X.dot(numpy.random.randn(input_dim, D))
# kernel = GPy.kern.bias(input_dim)
# kernel = GPy.kern.Bias(input_dim)
#
# kernels = [GPy.kern.linear(input_dim), GPy.kern.rbf(input_dim), GPy.kern.bias(input_dim),
# GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim),
# GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)]
# kernels = [GPy.kern.Linear(input_dim), GPy.kern.RBF(input_dim), GPy.kern.Bias(input_dim),
# GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim),
# GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim)]
# for k in kernels:
# m = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
@ -148,32 +148,32 @@ if __name__ == "__main__":
# assert m.checkgrad(), "{} x psi1".format("+".join(map(lambda x: x.name, k.parts)))
#
m0 = PsiStatModel('psi0', X=X, X_variance=X_var, Z=Z,
num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim)+GPy.kern.bias(input_dim))
num_inducing=num_inducing, kernel=GPy.kern.RBF(input_dim)+GPy.kern.Bias(input_dim))
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=kernel)
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=kernel)
# m2 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim))
# num_inducing=num_inducing, kernel=GPy.kern.RBF(input_dim))
# m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
# + GPy.kern.bias(input_dim))
# num_inducing=num_inducing, kernel=GPy.kern.Linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
# + GPy.kern.Bias(input_dim))
# m = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
# num_inducing=num_inducing,
# num_inducing=num_inducing,
# kernel=(
# GPy.kern.rbf(input_dim, ARD=1)
# +GPy.kern.linear(input_dim, ARD=1)
# +GPy.kern.bias(input_dim))
# GPy.kern.RBF(input_dim, ARD=1)
# +GPy.kern.Linear(input_dim, ARD=1)
# +GPy.kern.Bias(input_dim))
# )
# m.ensure_default_constraints()
m2 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
num_inducing=num_inducing, kernel=(
GPy.kern.rbf(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.linear(input_dim, numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.rbf(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.rbf(input_dim, numpy.random.rand(), numpy.random.rand(), ARD=0)
+GPy.kern.bias(input_dim)
+GPy.kern.white(input_dim)
GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.Linear(input_dim, numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(input_dim), ARD=1)
#+GPy.kern.RBF(input_dim, numpy.random.rand(), numpy.random.rand(), ARD=0)
+GPy.kern.Bias(input_dim)
+GPy.kern.White(input_dim)
)
)
m2.ensure_default_constraints()

View file

@ -10,10 +10,10 @@ class sparse_GPLVMTests(unittest.TestCase):
def test_bias_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
@ -21,10 +21,10 @@ class sparse_GPLVMTests(unittest.TestCase):
def test_linear_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001)
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
@ -32,10 +32,10 @@ class sparse_GPLVMTests(unittest.TestCase):
def test_rbf_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())

View file

@ -33,7 +33,7 @@ class GradientTests(unittest.TestCase):
# Get model type (GPRegression, SparseGPRegression, etc)
model_fit = getattr(GPy.models, model_type)
# noise = GPy.kern.white(dimension)
# noise = GPy.kern.White(dimension)
kern = kern # + noise
if uncertain_inputs:
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
@ -45,17 +45,17 @@ class GradientTests(unittest.TestCase):
def test_GPRegression_rbf_1d(self):
''' Testing the GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.rbf(1)
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='GPRegression', dimension=1)
def test_GPRegression_rbf_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.rbf(2)
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='GPRegression', dimension=2)
def test_GPRegression_rbf_ARD_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
k = GPy.kern.rbf(2, ARD=True)
k = GPy.kern.RBF(2, ARD=True)
self.check_model(k, model_type='GPRegression', dimension=2)
def test_GPRegression_mlp_1d(self):
@ -65,7 +65,7 @@ class GradientTests(unittest.TestCase):
def test_GPRegression_poly_1d(self):
''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
mlp = GPy.kern.poly(1, degree=5)
mlp = GPy.kern.Poly(1, degree=5)
self.check_model(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_1D(self):
@ -100,80 +100,80 @@ class GradientTests(unittest.TestCase):
def test_GPRegression_exponential_1D(self):
''' Testing the GP regression with exponential kernel on 1d data '''
exponential = GPy.kern.exponential(1)
exponential = GPy.kern.Exponential(1)
self.check_model(exponential, model_type='GPRegression', dimension=1)
def test_GPRegression_exponential_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.exponential(2)
exponential = GPy.kern.Exponential(2)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_exponential_ARD_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.exponential(2, ARD=True)
exponential = GPy.kern.Exponential(2, ARD=True)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_bias_kern_1D(self):
''' Testing the GP regression with bias kernel on 1d data '''
bias = GPy.kern.bias(1)
bias = GPy.kern.Bias(1)
self.check_model(bias, model_type='GPRegression', dimension=1)
def test_GPRegression_bias_kern_2D(self):
''' Testing the GP regression with bias kernel on 2d data '''
bias = GPy.kern.bias(2)
bias = GPy.kern.Bias(2)
self.check_model(bias, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D_ARD(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.linear(1, ARD=True)
linear = GPy.kern.Linear(1, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D_ARD(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.linear(2, ARD=True)
linear = GPy.kern.Linear(2, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.linear(1)
linear = GPy.kern.Linear(1)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.linear(2)
linear = GPy.kern.Linear(2)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_SparseGPRegression_rbf_white_kern_1d(self):
''' Testing the sparse GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.rbf(1)
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.rbf(2)
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2)
#@unittest.expectedFailure
def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 2d data with uncertain inputs'''
rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2)
rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2)
raise unittest.SkipTest("This is not implemented yet!")
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)
#@unittest.expectedFailure
def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 1d data with uncertain inputs'''
rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1)
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1)
raise unittest.SkipTest("This is not implemented yet!")
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)
@ -181,7 +181,7 @@ class GradientTests(unittest.TestCase):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, kernel=k)
@ -191,7 +191,7 @@ class GradientTests(unittest.TestCase):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05)
k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)
@ -201,7 +201,7 @@ class GradientTests(unittest.TestCase):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
kernel = GPy.kern.rbf(1)
kernel = GPy.kern.RBF(1)
m = GPy.models.GPClassification(X,Y,kernel=kernel)
m.update_likelihood_approximation()
self.assertTrue(m.checkgrad())
@ -211,7 +211,7 @@ class GradientTests(unittest.TestCase):
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
Z = np.linspace(0, 15, 4)[:, None]
kernel = GPy.kern.rbf(1)
kernel = GPy.kern.RBF(1)
m = GPy.models.SparseGPClassification(X,Y,kernel=kernel,Z=Z)
#distribution = GPy.likelihoods.likelihood_functions.Bernoulli()
#likelihood = GPy.likelihoods.EP(Y, distribution)
@ -223,7 +223,7 @@ class GradientTests(unittest.TestCase):
def test_generalized_FITC(self):
N = 20
X = np.hstack([np.random.rand(N / 2) + 1, np.random.rand(N / 2) - 1])[:, None]
k = GPy.kern.rbf(1) + GPy.kern.white(1)
k = GPy.kern.RBF(1) + GPy.kern.White(1)
Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None]
m = GPy.models.FITCClassification(X, Y, kernel = k)
m.update_likelihood_approximation()
@ -237,7 +237,7 @@ class GradientTests(unittest.TestCase):
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.rbf(1)
k1 = GPy.kern.RBF(1)
m = GPy.models.GPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1])
m.constrain_fixed('.*rbf_var', 1.)
self.assertTrue(m.checkgrad())
@ -250,7 +250,7 @@ class GradientTests(unittest.TestCase):
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.rbf(1)
k1 = GPy.kern.RBF(1)
m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1])
m.constrain_fixed('.*rbf_var', 1.)
self.assertTrue(m.checkgrad())