Lots of tidying in the inference section

This commit is contained in:
James Hensman 2013-12-04 17:58:02 +00:00
parent 435cbbc421
commit f5bae4450f
3 changed files with 80 additions and 15 deletions

View file

@ -1,13 +1,53 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
def exact_gaussian_inference(K, likelihood, Y, Y_metadata=None): from posterior import Posterior
from .../util.linalg import pdinv, dpotrs, tdot
log_2_pi = np.log(2*np.pi)
Wi, LW, LWi, W_logdet = pdinv(K + likelhood.covariance(Y, Y_metadata)) class exact_gaussian_inference(object):
"""
An object for inference when the likelihood is Gaussian.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
"""
def __init__(self):
self._YYTfactor_cache = caching.cache()
def get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y.
"""
N, D = Y.shape
if (N>D):
return Y
else:
#if Y in self.cache, return self.Cache[Y], else stor Y in cache and return L.
raise NotImplementedError, 'TODO' #TODO
def inference(self, K, likelihood, Y, Y_metadata=None):
"""
Returns a Posterior class containing essential quantities of the posterior
"""
YYT_factor = self.get_YYTfactor(Y)
Wi, LW, LWi, W_logdet = pdinv(K + likelhood.covariance(Y, Y_metadata))
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor.T)
dL_dtheta_lik = likelihood.dL_dtheta(np.diag(dL_dK))
return Posterior(log_marginal, dL_DK, dL_dtheta_lik, LW, alpha, K):
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
log_marginal = (-0.5 * Y.size * np.log(2.*np.pi) -
0.5 * Y.shape[1] * W_logdet + np.sum(np.square(alpha))

View file

@ -1,33 +1,58 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np import numpy as np
class Posterior(object): class Posterior(object):
""" """
An object to represent a Gaussian posterior over latent function values. An object to represent a Gaussian posterior over latent function values.
""" this may be computed exactly for Gaussian likelihoods, or approximated for
def __init__(self, log_marginal, dL_dmean=None, cov=None, prec=None): non-Gaussian likelihoods.
self._log_marginal = log_marginal
#TODO: accept the init arguments, make sure we've got enough information to compute everything. The purpose of this clas is to serve as an interface between the inference
schemes and the model classes.
"""
def __init__(self, log_marginal, dLM_DK, dLM_dtheta_lik, woodbury_chol, woodbury_mean, K):
"""
log_marginal: log p(Y|X)
DLM_dK: d/dK log p(Y|X)
dLM_dtheta_lik : d/dtheta log p(Y|X) (where theta are the parameters of the likelihood)
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
woodbury_mean : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
K : the proir covariance (required for lazy computation of various quantities)
"""
self.log_marginal = log_marginal
self.dLM_DK = dLM_DK
self.dLM_dtheta_lik = _dLM_dtheta_lik
self._woodbury_chol = woodbury_chol
self._woodbury_mean = woodbury_mean
self._K = K
#these are computed lazily below
self._mean = None
self._covariance = None
self._precision = None
@property @property
def mean(self): def mean(self):
if self._mean is None: if self._mean is None:
self._mean = ?? self._mean = np.dot(self._K, self._woodbury_mean)
return self._mean return self._mean
@property @property
def covariance(self): def covariance(self):
if self._covariance is None: if self._covariance is None:
self._covariance = ?? LiK, _ = dpotrs
self._covariance = self._K - tdot(LiK.T)
return self._covariance return self._covariance
@property @property
def precision(self): def precision(self):
if self._precision is None: if self._precision is None:
self._precision = ?? self._precision = np.linalg.inv(self.covariance)
return self._precision return self._precision
@prop

Binary file not shown.