mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-11 21:12:38 +02:00
fix normalizer
This commit is contained in:
parent
5a907bd013
commit
7550b1e5ef
2 changed files with 100 additions and 4 deletions
|
|
@ -451,6 +451,15 @@ class GP(Model):
|
||||||
alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable),
|
alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable),
|
||||||
self.posterior.woodbury_inv)
|
self.posterior.woodbury_inv)
|
||||||
var_jac += kern.gradients_X(alpha, Xnew, self._predictive_variable)
|
var_jac += kern.gradients_X(alpha, Xnew, self._predictive_variable)
|
||||||
|
|
||||||
|
if self.normalizer is not None:
|
||||||
|
mean_jac = self.normalizer.inverse_mean(mean_jac) \
|
||||||
|
- self.normalizer.inverse_mean(0.)
|
||||||
|
if self.output_dim > 1:
|
||||||
|
var_jac = self.normalizer.inverse_covariance(var_jac)
|
||||||
|
else:
|
||||||
|
var_jac = self.normalizer.inverse_variance(var_jac)
|
||||||
|
|
||||||
return mean_jac, var_jac
|
return mean_jac, var_jac
|
||||||
|
|
||||||
def predict_jacobian(self, Xnew, kern=None, full_cov=False):
|
def predict_jacobian(self, Xnew, kern=None, full_cov=False):
|
||||||
|
|
@ -711,11 +720,59 @@ class GP(Model):
|
||||||
mu_star, var_star = self._raw_predict(x_test)
|
mu_star, var_star = self._raw_predict(x_test)
|
||||||
return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples)
|
return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples)
|
||||||
|
|
||||||
def posterior_covariance_between_points(self, X1, X2):
|
|
||||||
|
def _raw_posterior_covariance_between_points(self, X1, X2):
|
||||||
"""
|
"""
|
||||||
Computes the posterior covariance between points.
|
Computes the posterior covariance between points. Does not account for
|
||||||
|
normalization or likelihood
|
||||||
|
|
||||||
:param X1: some input observations
|
:param X1: some input observations
|
||||||
:param X2: other input observations
|
:param X2: other input observations
|
||||||
|
|
||||||
|
:returns:
|
||||||
|
cov: raw posterior covariance: k(X1,X2) - k(X1,X) G^{-1} K(X,X2)
|
||||||
"""
|
"""
|
||||||
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
|
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
|
||||||
|
|
||||||
|
|
||||||
|
def posterior_covariance_between_points(self, X1, X2, Y_metadata=None,
|
||||||
|
likelihood=None,
|
||||||
|
include_likelihood=True):
|
||||||
|
"""
|
||||||
|
Computes the posterior covariance between points. Includes likelihood
|
||||||
|
variance as well as normalization so that evaluation at (x,x) is consistent
|
||||||
|
with model.predict
|
||||||
|
|
||||||
|
:param X1: some input observations
|
||||||
|
:param X2: other input observations
|
||||||
|
:param Y_metadata: metadata about the predicting point to pass to the
|
||||||
|
likelihood
|
||||||
|
:param include_likelihood: Whether or not to add likelihood noise to
|
||||||
|
the predicted underlying latent function f.
|
||||||
|
:type include_likelihood: bool
|
||||||
|
|
||||||
|
:returns:
|
||||||
|
cov: posterior covariance, a Numpy array, Nnew x Nnew if
|
||||||
|
self.output_dim == 1, and Nnew x Nnew x self.output_dim otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
cov = self._raw_posterior_covariance_between_points(X1, X2)
|
||||||
|
|
||||||
|
if include_likelihood:
|
||||||
|
# Predict latent mean and push through likelihood
|
||||||
|
mean, _ = self._raw_predict(X1, full_cov=True)
|
||||||
|
if likelihood is None:
|
||||||
|
likelihood = self.likelihood
|
||||||
|
_, cov = likelihood.predictive_values(mean, cov, full_cov=True,
|
||||||
|
Y_metadata=Y_metadata)
|
||||||
|
|
||||||
|
if self.normalizer is not None:
|
||||||
|
if self.output_dim > 1:
|
||||||
|
cov = self.normalizer.inverse_covariance(cov)
|
||||||
|
else:
|
||||||
|
cov = self.normalizer.inverse_variance(cov)
|
||||||
|
|
||||||
|
return cov
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1168,7 +1168,7 @@ class GradientTests(np.testing.TestCase):
|
||||||
Y = np.array([[1], [2]])
|
Y = np.array([[1], [2]])
|
||||||
m = GPy.models.GPRegression(X1, Y, kernel=k)
|
m = GPy.models.GPRegression(X1, Y, kernel=k)
|
||||||
|
|
||||||
result = m.posterior_covariance_between_points(X1, X2)
|
result = m._raw_posterior_covariance_between_points(X1, X2)
|
||||||
expected = np.array([[0.4, 2.2], [1.0, 1.0]]) / 3.0
|
expected = np.array([[0.4, 2.2], [1.0, 1.0]]) / 3.0
|
||||||
|
|
||||||
self.assertTrue(np.allclose(result, expected))
|
self.assertTrue(np.allclose(result, expected))
|
||||||
|
|
@ -1179,7 +1179,7 @@ class GradientTests(np.testing.TestCase):
|
||||||
m = _create_missing_data_model(k, Q)
|
m = _create_missing_data_model(k, Q)
|
||||||
|
|
||||||
with self.assertRaises(RuntimeError):
|
with self.assertRaises(RuntimeError):
|
||||||
m.posterior_covariance_between_points(np.array([[1], [2]]), np.array([[3], [4]]))
|
m._raw_posterior_covariance_between_points(np.array([[1], [2]]), np.array([[3], [4]]))
|
||||||
|
|
||||||
def test_multioutput_model_with_derivative_observations(self):
|
def test_multioutput_model_with_derivative_observations(self):
|
||||||
f = lambda x: np.sin(x)+0.1*(x-2.)**2-0.005*x**3
|
f = lambda x: np.sin(x)+0.1*(x-2.)**2-0.005*x**3
|
||||||
|
|
@ -1242,6 +1242,45 @@ class GradientTests(np.testing.TestCase):
|
||||||
|
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
||||||
|
def test_predictive_gradients_with_normalizer(self):
|
||||||
|
"""
|
||||||
|
Check that model.predictive_gradients returns the gradients of
|
||||||
|
model.predict when normalizer=True
|
||||||
|
"""
|
||||||
|
N, M, Q = 10, 15, 3
|
||||||
|
X = np.random.rand(M,Q)
|
||||||
|
Y = np.random.rand(M,1)
|
||||||
|
x = np.random.rand(N, Q)
|
||||||
|
model = GPy.models.GPRegression(X=X, Y=Y, normalizer=True)
|
||||||
|
from GPy.models import GradientChecker
|
||||||
|
gm = GradientChecker(lambda x: model.predict(x)[0],
|
||||||
|
lambda x: model.predictive_gradients(x)[0],
|
||||||
|
x, 'x')
|
||||||
|
gc = GradientChecker(lambda x: model.predict(x)[1],
|
||||||
|
lambda x: model.predictive_gradients(x)[1],
|
||||||
|
x, 'x')
|
||||||
|
assert(gm.checkgrad())
|
||||||
|
assert(gc.checkgrad())
|
||||||
|
|
||||||
|
|
||||||
|
def test_posterior_covariance_between_points_with_normalizer(self):
|
||||||
|
"""
|
||||||
|
Check that model.posterior_covariance_between_points returns
|
||||||
|
the covariance from model.predict when normalizer=True
|
||||||
|
"""
|
||||||
|
np.random.seed(3)
|
||||||
|
N, M, Q = 10, 15, 3
|
||||||
|
X = np.random.rand(M,Q)
|
||||||
|
Y = np.random.rand(M,1)
|
||||||
|
x = np.random.rand(2, Q)
|
||||||
|
model = GPy.models.GPRegression(X=X, Y=Y, normalizer=True)
|
||||||
|
|
||||||
|
c1 = model.posterior_covariance_between_points(x,x)
|
||||||
|
c2 = model.predict(x, full_cov=True)[1]
|
||||||
|
np.testing.assert_allclose(c1,c2)
|
||||||
|
|
||||||
|
|
||||||
def _create_missing_data_model(kernel, Q):
|
def _create_missing_data_model(kernel, Q):
|
||||||
D1, D2, D3, N, num_inducing = 13, 5, 8, 400, 3
|
D1, D2, D3, N, num_inducing = 13, 5, 8, 400, 3
|
||||||
_, _, Ylist = GPy.examples.dimensionality_reduction._simulate_matern(D1, D2, D3, N, num_inducing, False)
|
_, _, Ylist = GPy.examples.dimensionality_reduction._simulate_matern(D1, D2, D3, N, num_inducing, False)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue