tests are now passing

This commit is contained in:
James Hensman 2013-06-04 17:19:33 +01:00
parent 362389781b
commit edae1032c7
11 changed files with 38 additions and 34 deletions

View file

@ -29,7 +29,7 @@ class GP(GPBase):
"""
def __init__(self, X, likelihood, kernel, normalize_X=False):
super(GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
self._set_params(self._get_params())
def _set_params(self, p):
@ -53,6 +53,10 @@ class GP(GPBase):
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()

View file

@ -28,14 +28,11 @@ class GPBase(model.model):
self._Xmean = np.zeros((1,self.Q))
self._Xstd = np.ones((1,self.Q))
super(GPBase, self).__init__()
model.model.__init__(self)
# All leaf nodes should call self._set_params(self._get_params()) at
# the end
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the
@ -77,8 +74,6 @@ class GPBase(model.model):
ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None])))
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
pb.ylim(ymin, ymax)
if hasattr(self, 'Z'):
pb.plot(self.Z, self.Z * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12)
elif self.X.shape[1] == 2:
resolution = resolution or 50

View file

@ -19,7 +19,7 @@ import pdb
class model(parameterised):
def __init__(self):
parameterised.__init__(self)
self.priors = [None for i in range(self._get_params().size)]
self.priors = None
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'tnc'
@ -33,13 +33,13 @@ class model(parameterised):
def _log_likelihood_gradients(self):
raise NotImplementedError, "this needs to be implemented to use the model class"
def set_prior(self, which, what):
def set_prior(self, regexp, what):
"""
Sets priors on the model parameters.
Arguments
---------
which -- string, regexp, or integer array
regexp -- string, regexp, or integer array
what -- instance of a prior class
Notes
@ -51,8 +51,10 @@ class model(parameterised):
For tied parameters, the prior will only be "counted" once, thus
a prior object is only inserted on the first tied index
"""
if self.priors is None:
self.priors = [None for i in range(self._get_params().size)]
which = self.grep_param_names(which)
which = self.grep_param_names(regexp)
# check tied situation
tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie) == set(which))]
@ -104,10 +106,15 @@ class model(parameterised):
def log_prior(self):
"""evaluate the prior"""
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
if self.priors is not None:
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
else:
return 0.
def _log_prior_gradients(self):
"""evaluate the gradients of the priors"""
if self.priors is None:
return 0.
x = self._get_params()
ret = np.zeros(x.size)
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
@ -135,7 +142,8 @@ class model(parameterised):
self._set_params_transformed(x)
# now draw from prior where possible
x = self._get_params()
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
if self.priors is not None:
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
self._set_params(x)
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
@ -234,16 +242,13 @@ class model(parameterised):
Gets the gradients from the likelihood and the priors.
"""
self._set_params_transformed(x)
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
prior_gradients = self._transform_gradients(self._log_prior_gradients())
return -LL_gradients - prior_gradients
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_grads
def objective_and_gradients(self, x):
self._set_params_transformed(x)
obj_f = -self.log_likelihood() - self.log_prior()
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
prior_gradients = self._transform_gradients(self._log_prior_gradients())
obj_grads = -LL_gradients - prior_gradients
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_f, obj_grads
def optimize(self, optimizer=None, start=None, **kwargs):

View file

@ -4,7 +4,6 @@
import numpy as np
import pylab as pb
from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides, chol_inv
from GP import GP
from scipy import linalg
from ..likelihoods import Gaussian
from gp_base import GPBase
@ -30,7 +29,7 @@ class sparse_GP(GPBase):
"""
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
super(sparse_GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
self.Z = Z
self.M = Z.shape[0]
@ -160,10 +159,11 @@ class sparse_GP(GPBase):
self._computations()
def _get_params(self):
return np.hstack([self.Z.flatten(), GP._get_params(self)])
return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()])
def _get_param_names(self):
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], []) + GP._get_param_names(self)
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[])\
+ self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
@ -282,7 +282,7 @@ class sparse_GP(GPBase):
return mean, var, _025pm, _975pm
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20):
super(sparse_GP, self).plot(samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20)
GPBase.plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20)
if self.X.shape[1] == 1:
Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now
if self.has_uncertain_inputs:

View file

@ -26,13 +26,13 @@ class GPLVM(GP):
:type init: 'PCA'|'random'
"""
def __init__(self, Y, Q, init='PCA', X = None, kernel=None, normalize_Y=False, **kwargs):
def __init__(self, Y, Q, init='PCA', X = None, kernel=None, normalize_Y=False):
if X is None:
X = self.initialise_latent(init, Q, Y)
if kernel is None:
kernel = kern.rbf(Q, ARD=Q>1) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
likelihood = Gaussian(Y, normalize=normalize_Y)
super(GPLVM, self).__init__(self, X, likelihood, kernel, **kwargs)
GP.__init__(self, X, likelihood, kernel, normalize_X=False)
self._set_params(self._get_params())
def initialise_latent(self, init, Q, Y):

View file

@ -31,5 +31,5 @@ class GP_regression(GP):
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
super(GP_regression, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
self._set_params(self._get_params())

View file

@ -40,7 +40,7 @@ class generalized_FITC(sparse_GP):
self.M = self.Z.shape[0]
self.true_precision = likelihood.precision
super(generalized_FITC, self).__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False)
super(generalized_FITC, self).__init__(X, likelihood, kernel=kernel, Z=self.Z, X_variance=X_variance, normalize_X=normalize_X)
self._set_params(self._get_params())
def _set_params(self, p):

View file

@ -23,9 +23,9 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM):
:type init: 'PCA'|'random'
"""
def __init__(self, Y, Q, init='PCA', **kwargs):
def __init__(self, Y, Q, kernel=None, init='PCA', M=10):
X = self.initialise_latent(init, Q, Y)
sparse_GP_regression.__init__(self, X, Y, **kwargs)
sparse_GP_regression.__init__(self, X, Y, kernel=kernel,M=M)
def _get_param_names(self):
return (sum([['X_%i_%i'%(n,q) for q in range(self.Q)] for n in range(self.N)],[])

View file

@ -43,5 +43,5 @@ class sparse_GP_regression(sparse_GP):
#likelihood defaults to Gaussian
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
super(sparse_GP_regression, self).__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X)
sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
self._set_params(self._get_params())

View file

@ -29,7 +29,7 @@ class warpedGP(GP):
self.predict_in_warped_space = False
likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y)
super(warpedGP, self).__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
self._set_params(self._get_params())
def _scale_data(self, Y):

View file

@ -171,7 +171,7 @@ class GradientTests(unittest.TestCase):
kernel = GPy.kern.rbf(1)
distribution = GPy.likelihoods.likelihood_functions.probit()
likelihood = GPy.likelihoods.EP(Y, distribution)
m = GPy.models.GP(X, likelihood, kernel)
m = GPy.core.GP(X, likelihood, kernel)
m.ensure_default_constraints()
m.update_likelihood_approximation()
self.assertTrue(m.checkgrad())
@ -185,7 +185,7 @@ class GradientTests(unittest.TestCase):
kernel = GPy.kern.rbf(1)
distribution = GPy.likelihoods.likelihood_functions.probit()
likelihood = GPy.likelihoods.EP(Y, distribution)
m = GPy.models.sparse_GP(X, likelihood, kernel,Z)
m = GPy.core.sparse_GP(X, likelihood, kernel,Z)
m.ensure_default_constraints()
m.update_likelihood_approximation()
self.assertTrue(m.checkgrad())