mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
Merge pull request #350 from SheffieldML/fixed_inputs
Fixed inputs and BGPLVM prediction tests
This commit is contained in:
commit
b1e7ab8c34
4 changed files with 113 additions and 5 deletions
|
|
@ -235,8 +235,6 @@ def plot_density(self, plot_limits=None, fixed_inputs=None,
|
||||||
|
|
||||||
Give the Y_metadata in the predict_kw if you need it.
|
Give the Y_metadata in the predict_kw if you need it.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
|
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
|
||||||
:type plot_limits: np.array
|
:type plot_limits: np.array
|
||||||
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
|
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
#===============================================================================
|
#===============================================================================
|
||||||
# Copyright (c) 2015, Max Zwiessele
|
# Copyright (c) 2016, Max Zwiessele, Alan saul
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# Redistribution and use in source and binary forms, with or without
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
|
@ -117,3 +117,42 @@ def align_subplot_array(axes,xlim=None, ylim=None):
|
||||||
ax.set_xticks([])
|
ax.set_xticks([])
|
||||||
else:
|
else:
|
||||||
removeUpperTicks(ax)
|
removeUpperTicks(ax)
|
||||||
|
|
||||||
|
def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False):
|
||||||
|
"""
|
||||||
|
Convenience function for returning back fixed_inputs where the other inputs
|
||||||
|
are fixed using fix_routine
|
||||||
|
:param model: model
|
||||||
|
:type model: Model
|
||||||
|
:param non_fixed_inputs: dimensions of non fixed inputs
|
||||||
|
:type non_fixed_inputs: list
|
||||||
|
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
|
||||||
|
:type fix_routine: string
|
||||||
|
:param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix
|
||||||
|
:type as_list: boolean
|
||||||
|
"""
|
||||||
|
from ...inference.latent_function_inference.posterior import VariationalPosterior
|
||||||
|
f_inputs = []
|
||||||
|
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
|
||||||
|
X = model.X.mean.values.copy()
|
||||||
|
elif isinstance(model.X, VariationalPosterior):
|
||||||
|
X = model.X.values.copy()
|
||||||
|
else:
|
||||||
|
if X_all:
|
||||||
|
X = model.X_all.copy()
|
||||||
|
else:
|
||||||
|
X = model.X.copy()
|
||||||
|
for i in range(X.shape[1]):
|
||||||
|
if i not in non_fixed_inputs:
|
||||||
|
if fix_routine == 'mean':
|
||||||
|
f_inputs.append( (i, np.mean(X[:,i])) )
|
||||||
|
if fix_routine == 'median':
|
||||||
|
f_inputs.append( (i, np.median(X[:,i])) )
|
||||||
|
else: # set to zero zero
|
||||||
|
f_inputs.append( (i, 0) )
|
||||||
|
if not as_list:
|
||||||
|
X[:,i] = f_inputs[-1][1]
|
||||||
|
if as_list:
|
||||||
|
return f_inputs
|
||||||
|
else:
|
||||||
|
return X
|
||||||
|
|
|
||||||
|
|
@ -148,6 +148,28 @@ class MiscTests(unittest.TestCase):
|
||||||
assert(gc.checkgrad())
|
assert(gc.checkgrad())
|
||||||
assert(gc2.checkgrad())
|
assert(gc2.checkgrad())
|
||||||
|
|
||||||
|
def test_predict_uncertain_inputs(self):
|
||||||
|
""" Projection of Gaussian through a linear function is still gaussian, and moments are analytical to compute, so we can check this case for predictions easily """
|
||||||
|
X = np.linspace(-5,5, 10)[:, None]
|
||||||
|
Y = 2*X + np.random.randn(*X.shape)*1e-3
|
||||||
|
m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)
|
||||||
|
m.Gaussian_noise[:] = 1e-4
|
||||||
|
m.X.mean[:] = X[:]
|
||||||
|
m.X.variance[:] = 1e-5
|
||||||
|
m.X.fix()
|
||||||
|
m.optimize()
|
||||||
|
X_pred_mu = np.random.randn(5, 1)
|
||||||
|
X_pred_var = np.random.rand(5, 1) + 1e-5
|
||||||
|
from GPy.core.parameterization.variational import NormalPosterior
|
||||||
|
X_pred = NormalPosterior(X_pred_mu, X_pred_var)
|
||||||
|
# mu = \int f(x)q(x|mu,S) dx = \int 2x.q(x|mu,S) dx = 2.mu
|
||||||
|
# S = \int (f(x) - m)^2q(x|mu,S) dx = \int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S
|
||||||
|
Y_mu_true = 2*X_pred_mu
|
||||||
|
Y_var_true = 4*X_pred_var
|
||||||
|
Y_mu_pred, Y_var_pred = m._raw_predict(X_pred)
|
||||||
|
np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-4)
|
||||||
|
np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-4)
|
||||||
|
|
||||||
def test_sparse_raw_predict(self):
|
def test_sparse_raw_predict(self):
|
||||||
k = GPy.kern.RBF(1)
|
k = GPy.kern.RBF(1)
|
||||||
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
|
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
#===============================================================================
|
#===============================================================================
|
||||||
# Copyright (c) 2016, Max Zwiessele
|
# Copyright (c) 2016, Max Zwiessele, Alan Saul
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# Redistribution and use in source and binary forms, with or without
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
|
@ -46,4 +46,53 @@ class TestDebug(unittest.TestCase):
|
||||||
self.assertFalse(checkFullRank(tdot(array), name='test'))
|
self.assertFalse(checkFullRank(tdot(array), name='test'))
|
||||||
|
|
||||||
array = np.random.normal(0, 1, (25,25))
|
array = np.random.normal(0, 1, (25,25))
|
||||||
self.assertTrue(checkFullRank(tdot(array)))
|
self.assertTrue(checkFullRank(tdot(array)))
|
||||||
|
|
||||||
|
def test_fixed_inputs_median(self):
|
||||||
|
""" test fixed_inputs convenience function """
|
||||||
|
from GPy.plotting.matplot_dep.util import fixed_inputs
|
||||||
|
import GPy
|
||||||
|
X = np.random.randn(10, 3)
|
||||||
|
Y = np.sin(X) + np.random.randn(10, 3)*1e-3
|
||||||
|
m = GPy.models.GPRegression(X, Y)
|
||||||
|
fixed = fixed_inputs(m, [1], fix_routine='median', as_list=True, X_all=False)
|
||||||
|
self.assertTrue((0, np.median(X[:,0])) in fixed)
|
||||||
|
self.assertTrue((2, np.median(X[:,2])) in fixed)
|
||||||
|
self.assertTrue(len([t for t in fixed if t[0] == 1]) == 0) # Unfixed input should not be in fixed
|
||||||
|
|
||||||
|
def test_fixed_inputs_mean(self):
|
||||||
|
from GPy.plotting.matplot_dep.util import fixed_inputs
|
||||||
|
import GPy
|
||||||
|
X = np.random.randn(10, 3)
|
||||||
|
Y = np.sin(X) + np.random.randn(10, 3)*1e-3
|
||||||
|
m = GPy.models.GPRegression(X, Y)
|
||||||
|
fixed = fixed_inputs(m, [1], fix_routine='mean', as_list=True, X_all=False)
|
||||||
|
self.assertTrue((0, np.mean(X[:,0])) in fixed)
|
||||||
|
self.assertTrue((2, np.mean(X[:,2])) in fixed)
|
||||||
|
self.assertTrue(len([t for t in fixed if t[0] == 1]) == 0) # Unfixed input should not be in fixed
|
||||||
|
|
||||||
|
def test_fixed_inputs_zero(self):
|
||||||
|
from GPy.plotting.matplot_dep.util import fixed_inputs
|
||||||
|
import GPy
|
||||||
|
X = np.random.randn(10, 3)
|
||||||
|
Y = np.sin(X) + np.random.randn(10, 3)*1e-3
|
||||||
|
m = GPy.models.GPRegression(X, Y)
|
||||||
|
fixed = fixed_inputs(m, [1], fix_routine='zero', as_list=True, X_all=False)
|
||||||
|
self.assertTrue((0, 0.0) in fixed)
|
||||||
|
self.assertTrue((2, 0.0) in fixed)
|
||||||
|
self.assertTrue(len([t for t in fixed if t[0] == 1]) == 0) # Unfixed input should not be in fixed
|
||||||
|
|
||||||
|
def test_fixed_inputs_uncertain(self):
|
||||||
|
from GPy.plotting.matplot_dep.util import fixed_inputs
|
||||||
|
import GPy
|
||||||
|
from GPy.core.parameterization.variational import NormalPosterior
|
||||||
|
X_mu = np.random.randn(10, 3)
|
||||||
|
X_var = np.random.randn(10, 3)
|
||||||
|
X = NormalPosterior(X_mu, X_var)
|
||||||
|
Y = np.sin(X_mu) + np.random.randn(10, 3)*1e-3
|
||||||
|
m = GPy.models.BayesianGPLVM(Y, X=X_mu, X_variance=X_var, input_dim=3)
|
||||||
|
fixed = fixed_inputs(m, [1], fix_routine='median', as_list=True, X_all=False)
|
||||||
|
self.assertTrue((0, np.median(X.mean.values[:,0])) in fixed)
|
||||||
|
self.assertTrue((2, np.median(X.mean.values[:,2])) in fixed)
|
||||||
|
self.assertTrue(len([t for t in fixed if t[0] == 1]) == 0) # Unfixed input should not be in fixed
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue