mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
adding some test cases for EP.. more work needs to be done, these are some high level test cases ..
This commit is contained in:
parent
255d97a917
commit
54f530a22b
2 changed files with 196 additions and 0 deletions
151
GPy/testing/ep_likelihood_tests.py
Normal file
151
GPy/testing/ep_likelihood_tests.py
Normal file
|
|
@ -0,0 +1,151 @@
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import unittest
|
||||||
|
import GPy
|
||||||
|
from GPy.models import GradientChecker
|
||||||
|
|
||||||
|
import pods
|
||||||
|
|
||||||
|
fixed_seed = 10
|
||||||
|
from nose.tools import with_setup, nottest
|
||||||
|
|
||||||
|
|
||||||
|
# this file will contain some high level tests, this is not unit testing, but will give us a higher level estimate
|
||||||
|
# if things are going well under the hood.
|
||||||
|
class TestObservationModels(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
np.random.seed(fixed_seed)
|
||||||
|
self.N = 100
|
||||||
|
self.D = 2
|
||||||
|
self.X = np.random.rand(self.N, self.D)
|
||||||
|
|
||||||
|
self.real_noise_std = 0.05
|
||||||
|
noise = np.random.randn(*self.X[:, 0].shape) * self.real_noise_std
|
||||||
|
self.Y = (np.sin(self.X[:, 0] * 2 * np.pi) + noise)[:, None]
|
||||||
|
self.num_points = self.X.shape[0]
|
||||||
|
self.f = np.random.rand(self.N, 1)
|
||||||
|
self.binary_Y = np.asarray(np.random.rand(self.N) > 0.5, dtype=np.int)[:, None]
|
||||||
|
# self.binary_Y[self.binary_Y == 0.0] = -1.0
|
||||||
|
self.positive_Y = np.exp(self.Y.copy())
|
||||||
|
|
||||||
|
self.Y_noisy = self.Y.copy()
|
||||||
|
self.Y_verynoisy = self.Y.copy()
|
||||||
|
self.Y_noisy[75:80] += 1.3
|
||||||
|
|
||||||
|
self.init_var = 0.3
|
||||||
|
self.deg_free = 5.
|
||||||
|
censored = np.zeros_like(self.Y)
|
||||||
|
random_inds = np.random.choice(self.N, int(self.N / 2), replace=True)
|
||||||
|
censored[random_inds] = 1
|
||||||
|
self.Y_metadata = dict()
|
||||||
|
self.Y_metadata['censored'] = censored
|
||||||
|
self.kernel1 = GPy.kern.RBF(self.X.shape[1]) + GPy.kern.White(self.X.shape[1])
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.Y = None
|
||||||
|
self.X = None
|
||||||
|
self.binary_Y =None
|
||||||
|
self.positive_Y = None
|
||||||
|
self.kernel1 = None
|
||||||
|
|
||||||
|
@with_setup(setUp, tearDown)
|
||||||
|
def testEPClassification(self):
|
||||||
|
bernoulli = GPy.likelihoods.Bernoulli()
|
||||||
|
laplace_inf = GPy.inference.latent_function_inference.Laplace()
|
||||||
|
|
||||||
|
ep_inf_alt = GPy.inference.latent_function_inference.EP(ep_mode='alternated')
|
||||||
|
ep_inf_nested = GPy.inference.latent_function_inference.EP(ep_mode='nested')
|
||||||
|
ep_inf_fractional = GPy.inference.latent_function_inference.EP(ep_mode='nested', eta=0.9)
|
||||||
|
|
||||||
|
m1 = GPy.core.GP(self.X, self.binary_Y.copy(), kernel=self.kernel1.copy(), likelihood=bernoulli.copy(), inference_method=laplace_inf)
|
||||||
|
# m1['.*white'].constrain_fixed(1e-6)
|
||||||
|
# m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 1)
|
||||||
|
m1.randomize()
|
||||||
|
|
||||||
|
m2 = GPy.core.GP(self.X, self.binary_Y.copy(), kernel=self.kernel1.copy(), likelihood=bernoulli.copy(), inference_method=ep_inf_alt)
|
||||||
|
m2.randomize()
|
||||||
|
|
||||||
|
m3 = GPy.core.GP(self.X, self.binary_Y.copy(), kernel=self.kernel1.copy(), likelihood=bernoulli.copy(), inference_method=ep_inf_nested)
|
||||||
|
m3.randomize()
|
||||||
|
#
|
||||||
|
m4 = GPy.core.GP(self.X, self.binary_Y.copy(), kernel=self.kernel1.copy(), likelihood=bernoulli.copy(), inference_method=ep_inf_fractional)
|
||||||
|
m4.randomize()
|
||||||
|
|
||||||
|
optimizer = 'bfgs'
|
||||||
|
|
||||||
|
#do gradcheck here ...
|
||||||
|
# self.assertTrue(m1.checkgrad())
|
||||||
|
# self.assertTrue(m2.checkgrad())
|
||||||
|
# self.assertTrue(m3.checkgrad())
|
||||||
|
# self.assertTrue(m4.checkgrad())
|
||||||
|
|
||||||
|
m1.optimize(optimizer=optimizer, max_iters=300)
|
||||||
|
m2.optimize(optimizer=optimizer, max_iters=300)
|
||||||
|
m3.optimize(optimizer=optimizer, max_iters=300)
|
||||||
|
m4.optimize(optimizer=optimizer, max_iters=300)
|
||||||
|
|
||||||
|
# taking laplace predictions as the ground truth
|
||||||
|
probs_mean_lap, probs_var_lap = m1.predict(self.X)
|
||||||
|
probs_mean_ep_alt, probs_var_ep_alt = m2.predict(self.X)
|
||||||
|
probs_mean_ep_nested, probs_var_ep_nested = m2.predict(self.X)
|
||||||
|
|
||||||
|
# for simple single dimension data , marginal likelihood for laplace and EP approximations should not be so far apart.
|
||||||
|
self.assertAlmostEqual(m1.log_likelihood(), m2.log_likelihood(),delta=1)
|
||||||
|
self.assertAlmostEqual(m1.log_likelihood(), m3.log_likelihood(), delta=1)
|
||||||
|
self.assertAlmostEqual(m1.log_likelihood(), m4.log_likelihood(), delta=5)
|
||||||
|
|
||||||
|
GPy.util.classification.conf_matrix(probs_mean_lap, self.binary_Y)
|
||||||
|
GPy.util.classification.conf_matrix(probs_mean_ep_alt, self.binary_Y)
|
||||||
|
GPy.util.classification.conf_matrix(probs_mean_ep_nested, self.binary_Y)
|
||||||
|
|
||||||
|
@nottest
|
||||||
|
def rmse(self, Y, Ystar):
|
||||||
|
return np.sqrt(np.mean((Y - Ystar) ** 2))
|
||||||
|
|
||||||
|
@with_setup(setUp, tearDown)
|
||||||
|
def test_EP_with_StudentT(self):
|
||||||
|
studentT = GPy.likelihoods.StudentT(deg_free=self.deg_free, sigma2=self.init_var)
|
||||||
|
laplace_inf = GPy.inference.latent_function_inference.Laplace()
|
||||||
|
|
||||||
|
ep_inf_alt = GPy.inference.latent_function_inference.EP(ep_mode='alternated')
|
||||||
|
ep_inf_nested = GPy.inference.latent_function_inference.EP(ep_mode='nested')
|
||||||
|
ep_inf_frac = GPy.inference.latent_function_inference.EP(ep_mode='nested', eta=0.7)
|
||||||
|
|
||||||
|
m1 = GPy.core.GP(self.X, self.Y_noisy.copy(), kernel=self.kernel1, likelihood=studentT.copy(), inference_method=laplace_inf)
|
||||||
|
# optimize
|
||||||
|
m1['.*white'].constrain_fixed(1e-5)
|
||||||
|
m1.randomize()
|
||||||
|
|
||||||
|
m2 = GPy.core.GP(self.X, self.Y_noisy.copy(), kernel=self.kernel1, likelihood=studentT.copy(), inference_method=ep_inf_alt)
|
||||||
|
m2['.*white'].constrain_fixed(1e-5)
|
||||||
|
# m2.constrain_bounded('.*t_scale2', 0.001, 10)
|
||||||
|
m2.randomize()
|
||||||
|
|
||||||
|
# m3 = GPy.core.GP(self.X, self.Y_noisy.copy(), kernel=self.kernel1, likelihood=studentT.copy(), inference_method=ep_inf_nested)
|
||||||
|
# m3['.*white'].constrain_fixed(1e-5)
|
||||||
|
# # m3.constrain_bounded('.*t_scale2', 0.001, 10)
|
||||||
|
# m3.randomize()
|
||||||
|
|
||||||
|
optimizer='bfgs'
|
||||||
|
m1.optimize(optimizer=optimizer,max_iters=400)
|
||||||
|
m2.optimize(optimizer=optimizer, max_iters=500)
|
||||||
|
print(m2[''])
|
||||||
|
|
||||||
|
self.assertAlmostEqual(m1.log_likelihood(), m2.log_likelihood(), 10)
|
||||||
|
# self.assertAlmostEqual(m1.log_likelihood(), m3.log_likelihood(), 3)
|
||||||
|
|
||||||
|
preds_mean_lap, preds_var_lap = m1.predict(self.X)
|
||||||
|
preds_mean_alt, preds_var_alt = m2.predict(self.X)
|
||||||
|
# preds_mean_nested, preds_var_nested = m3.predict(self.X)
|
||||||
|
rmse_lap = self.rmse(preds_mean_lap, self.Y_noisy)
|
||||||
|
rmse_alt = self.rmse(preds_mean_alt, self.Y_noisy)
|
||||||
|
# rmse_nested = self.rmse(preds_mean_nested, self.Y_noisy)
|
||||||
|
|
||||||
|
self.assertAlmostEqual(rmse_lap, rmse_alt, delta=1.)
|
||||||
|
# m3.optimize(optimizer=optimizer, max_iters=500)
|
||||||
|
|
||||||
|
def test_EP_with_CensoredData(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
|
|
@ -61,6 +61,18 @@ class InferenceGPEP(unittest.TestCase):
|
||||||
Y = lik.samples(f).reshape(-1,1)
|
Y = lik.samples(f).reshape(-1,1)
|
||||||
return X, Y
|
return X, Y
|
||||||
|
|
||||||
|
def genNoisyData(self):
|
||||||
|
np.random.seed(1)
|
||||||
|
X = np.random.rand(100,1)
|
||||||
|
self.real_std = 0.2
|
||||||
|
noise = np.random.randn(*X[:, 0].shape)*self.real_std
|
||||||
|
Y = (np.sin(X[:, 0]*2*np.pi) + noise)[:, None]
|
||||||
|
self.f = np.random.rand(X.shape[0],1)
|
||||||
|
Y_extra_noisy = Y.copy()
|
||||||
|
Y_extra_noisy[50:53] += 4.
|
||||||
|
Y_extra_noisy[80:83] -= 2.
|
||||||
|
return X, Y, Y_extra_noisy
|
||||||
|
|
||||||
def test_inference_EP(self):
|
def test_inference_EP(self):
|
||||||
from paramz import ObsAr
|
from paramz import ObsAr
|
||||||
X, Y = self.genData()
|
X, Y = self.genData()
|
||||||
|
|
@ -86,6 +98,39 @@ class InferenceGPEP(unittest.TestCase):
|
||||||
np.sum(p._woodbury_vector - p0._woodbury_vector),
|
np.sum(p._woodbury_vector - p0._woodbury_vector),
|
||||||
np.sum(p.woodbury_inv - p0.woodbury_inv)])) < 1e6)
|
np.sum(p.woodbury_inv - p0.woodbury_inv)])) < 1e6)
|
||||||
|
|
||||||
|
# NOTE: adding a test like above for parameterized likelihood- the above test is
|
||||||
|
# only for probit likelihood which does not have any tunable hyperparameter which is why
|
||||||
|
# the term in dictionary of gradients: dL_dthetaL will always be zero. So here we repeat tests for
|
||||||
|
# student-t likelihood and heterodescastic gaussian noise case. This test simply checks if the posterior
|
||||||
|
# and gradients of log marginal are roughly the same for inference through EP and exact gaussian inference using
|
||||||
|
# the gaussian approximation for the individual likelihood site terms. For probit likelihood, it is possible to
|
||||||
|
# calculate moments analytically, but for other likelihoods, we will need to use numerical quadrature techniques,
|
||||||
|
# and it is possible that any error might creep up because of quadrature implementation.
|
||||||
|
def test_inference_EP_non_classification(self):
|
||||||
|
from paramz import ObsAr
|
||||||
|
X, Y, Y_extra_noisy = self.genNoisyData()
|
||||||
|
deg_freedom = 5
|
||||||
|
init_noise_var = 0.4
|
||||||
|
lik_studentT = GPy.likelihoods.StudentT(deg_free=deg_freedom, sigma2=init_noise_var)
|
||||||
|
# like_gaussian_noise = GPy.likelihoods.MixedNoise()
|
||||||
|
k = GPy.kern.RBF(1, variance=2., lengthscale=1.1)
|
||||||
|
ep_inf_alt = GPy.inference.latent_function_inference.expectation_propagation.EP(max_iters=100, delta=0.5)
|
||||||
|
ep_inf_nested = GPy.inference.latent_function_inference.expectation_propagation.EP(ep_mode='nested', max_iters=100, delta=0.5)
|
||||||
|
m = GPy.core.GP(X=X,Y=Y_extra_noisy,kernel=k,likelihood=lik_studentT,inference_method=ep_inf_alt)
|
||||||
|
K = m.kern.K(X)
|
||||||
|
post_params, ga_approx, log_Z_tilde = m.inference_method.expectation_propagation(K, ObsAr(Y_extra_noisy), lik_studentT, None)
|
||||||
|
|
||||||
|
mu_tilde = ga_approx.v / ga_approx.tau.astype(float)
|
||||||
|
p, m, d = m.inference_method._inference(K, ga_approx, lik_studentT, Y_metadata=None, Z_tilde=log_Z_tilde)
|
||||||
|
p0, m0, d0 = super(GPy.inference.latent_function_inference.expectation_propagation.EP, ep_inf_alt).inference(k, X,lik_studentT ,mu_tilde[:,None], mean_function=None, variance=1./ga_approx.tau, K=K, Z_tilde=log_Z_tilde + np.sum(- 0.5*np.log(ga_approx.tau) + 0.5*(ga_approx.v*ga_approx.v*1./ga_approx.tau)))
|
||||||
|
|
||||||
|
assert (np.sum(np.array([m - m0,
|
||||||
|
np.sum(d['dL_dK'] - d0['dL_dK']),
|
||||||
|
np.sum(d['dL_dthetaL'] - d0['dL_dthetaL']),
|
||||||
|
np.sum(d['dL_dm'] - d0['dL_dm']),
|
||||||
|
np.sum(p._woodbury_vector - p0._woodbury_vector),
|
||||||
|
np.sum(p.woodbury_inv - p0.woodbury_inv)])) < 1e6)
|
||||||
|
|
||||||
|
|
||||||
class HMCSamplerTest(unittest.TestCase):
|
class HMCSamplerTest(unittest.TestCase):
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue