mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-06 10:32:39 +02:00
[ep] now calling exact inference instead of copying code
This commit is contained in:
parent
79bfbfc776
commit
8132084de6
7 changed files with 56 additions and 75 deletions
|
|
@ -98,7 +98,7 @@ class GP(Model):
|
|||
inference_method = exact_gaussian_inference.ExactGaussianInference()
|
||||
else:
|
||||
inference_method = expectation_propagation.EP()
|
||||
print("defaulting to ", inference_method, "for latent function inference")
|
||||
print("defaulting to " + str(inference_method) + " for latent function inference")
|
||||
self.inference_method = inference_method
|
||||
|
||||
logger.info("adding kernel and likelihood as parameters")
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ class DTC(LatentFunctionInference):
|
|||
num_data, output_dim = Y.shape
|
||||
|
||||
#make sure the noise is not hetero
|
||||
beta = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if beta.size > 1:
|
||||
gaussian_variance = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if gaussian_variance.size > 1:
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
|
|
@ -42,7 +42,7 @@ class DTC(LatentFunctionInference):
|
|||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
# Compute A
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(beta)
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(gaussian_variance)
|
||||
A = tdot(LiUTbeta) + np.eye(num_inducing)
|
||||
|
||||
# factor A
|
||||
|
|
@ -50,7 +50,7 @@ class DTC(LatentFunctionInference):
|
|||
|
||||
# back substutue to get b, P, v
|
||||
tmp, _ = dtrtrs(L, Uy, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*beta, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*gaussian_variance, lower=1)
|
||||
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
|
||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||
|
|
@ -59,8 +59,8 @@ class DTC(LatentFunctionInference):
|
|||
#compute log marginal
|
||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||
-np.sum(np.log(np.diag(LA)))*output_dim + \
|
||||
0.5*num_data*output_dim*np.log(beta) + \
|
||||
-0.5*beta*np.sum(np.square(Y)) + \
|
||||
0.5*num_data*output_dim*np.log(gaussian_variance) + \
|
||||
-0.5*gaussian_variance*np.sum(np.square(Y)) + \
|
||||
0.5*np.sum(np.square(b))
|
||||
|
||||
# Compute dL_dKmm
|
||||
|
|
@ -70,11 +70,11 @@ class DTC(LatentFunctionInference):
|
|||
# Compute dL_dU
|
||||
vY = np.dot(v.reshape(-1,1),Y.T)
|
||||
dL_dU = vY - np.dot(vvT_P, U.T)
|
||||
dL_dU *= beta
|
||||
dL_dU *= gaussian_variance
|
||||
|
||||
#compute dL_dR
|
||||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*beta**2
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./gaussian_variance + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*gaussian_variance**2
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
|
||||
|
|
@ -97,8 +97,8 @@ class vDTC(object):
|
|||
num_data, output_dim = Y.shape
|
||||
|
||||
#make sure the noise is not hetero
|
||||
beta = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if beta.size > 1:
|
||||
gaussian_variance = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if gaussian_variance.size > 1:
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
|
|
@ -111,9 +111,9 @@ class vDTC(object):
|
|||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
# Compute A
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(beta)
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(gaussian_variance)
|
||||
A_ = tdot(LiUTbeta)
|
||||
trace_term = -0.5*(np.sum(Knn)*beta - np.trace(A_))
|
||||
trace_term = -0.5*(np.sum(Knn)*gaussian_variance - np.trace(A_))
|
||||
A = A_ + np.eye(num_inducing)
|
||||
|
||||
# factor A
|
||||
|
|
@ -121,7 +121,7 @@ class vDTC(object):
|
|||
|
||||
# back substutue to get b, P, v
|
||||
tmp, _ = dtrtrs(L, Uy, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*beta, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*gaussian_variance, lower=1)
|
||||
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
|
||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||
|
|
@ -131,8 +131,8 @@ class vDTC(object):
|
|||
#compute log marginal
|
||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||
-np.sum(np.log(np.diag(LA)))*output_dim + \
|
||||
0.5*num_data*output_dim*np.log(beta) + \
|
||||
-0.5*beta*np.sum(np.square(Y)) + \
|
||||
0.5*num_data*output_dim*np.log(gaussian_variance) + \
|
||||
-0.5*gaussian_variance*np.sum(np.square(Y)) + \
|
||||
0.5*np.sum(np.square(b)) + \
|
||||
trace_term
|
||||
|
||||
|
|
@ -145,15 +145,15 @@ class vDTC(object):
|
|||
vY = np.dot(v.reshape(-1,1),Y.T)
|
||||
#dL_dU = vY - np.dot(vvT_P, U.T)
|
||||
dL_dU = vY - np.dot(vvT_P - Kmmi, U.T)
|
||||
dL_dU *= beta
|
||||
dL_dU *= gaussian_variance
|
||||
|
||||
#compute dL_dR
|
||||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*beta**2
|
||||
dL_dR -=beta*trace_term/num_data
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./gaussian_variance + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*gaussian_variance**2
|
||||
dL_dR -=gaussian_variance*trace_term/num_data
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*gaussian_variance, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
|
|
|||
|
|
@ -22,21 +22,7 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
def __init__(self):
|
||||
pass#self._YYTfactor_cache = caching.cache()
|
||||
|
||||
def get_YYTfactor(self, Y):
|
||||
"""
|
||||
find a matrix L which satisfies LL^T = YY^T.
|
||||
|
||||
Note that L may have fewer columns than Y, else L=Y.
|
||||
"""
|
||||
N, D = Y.shape
|
||||
if (N>D):
|
||||
return Y
|
||||
else:
|
||||
#if Y in self.cache, return self.Cache[Y], else store Y in cache and return L.
|
||||
#print "WARNING: N>D of Y, we need caching of L, such that L*L^T = Y, returning Y still!"
|
||||
return Y
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None):
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, gaussian_variance=None):
|
||||
"""
|
||||
Returns a Posterior class containing essential quantities of the posterior
|
||||
"""
|
||||
|
|
@ -46,13 +32,17 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
else:
|
||||
m = mean_function.f(X)
|
||||
|
||||
if gaussian_variance is None:
|
||||
gaussian_variance = likelihood.gaussian_variance(Y_metadata)
|
||||
|
||||
YYT_factor = self.get_YYTfactor(Y-m)
|
||||
YYT_factor = Y-m
|
||||
|
||||
K = kern.K(X)
|
||||
if K is None:
|
||||
K = kern.K(X)
|
||||
|
||||
Ky = K.copy()
|
||||
diag.add(Ky, likelihood.gaussian_variance(Y_metadata)+1e-8)
|
||||
diag.add(Ky, gaussian_variance+1e-8)
|
||||
|
||||
Wi, LW, LWi, W_logdet = pdinv(Ky)
|
||||
|
||||
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@
|
|||
import numpy as np
|
||||
from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs
|
||||
from .posterior import Posterior
|
||||
from . import LatentFunctionInference
|
||||
from . import ExactGaussianInference
|
||||
from ...util import diag
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class EP(LatentFunctionInference):
|
||||
class EP(ExactGaussianInference):
|
||||
def __init__(self, epsilon=1e-6, eta=1., delta=1.):
|
||||
"""
|
||||
The expectation-propagation algorithm.
|
||||
|
|
@ -20,6 +20,7 @@ class EP(LatentFunctionInference):
|
|||
:param delta: damping EP updates factor.
|
||||
:type delta: float64
|
||||
"""
|
||||
super(EP, self).__init__()
|
||||
self.epsilon, self.eta, self.delta = epsilon, eta, delta
|
||||
self.reset()
|
||||
|
||||
|
|
@ -34,12 +35,12 @@ class EP(LatentFunctionInference):
|
|||
# TODO: update approximation in the end as well? Maybe even with a switch?
|
||||
pass
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None):
|
||||
assert mean_function is None, "inference with a mean function not implemented"
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, gaussian_variance=None, K=None):
|
||||
num_data, output_dim = Y.shape
|
||||
assert output_dim ==1, "ep in 1D only (for now!)"
|
||||
|
||||
K = kern.K(X)
|
||||
if K is None:
|
||||
K = kern.K(X)
|
||||
|
||||
if self._ep_approximation is None:
|
||||
#if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
|
||||
|
|
@ -48,17 +49,7 @@ class EP(LatentFunctionInference):
|
|||
#if we've already run EP, just use the existing approximation stored in self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
|
||||
|
||||
Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde))
|
||||
|
||||
alpha, _ = dpotrs(LW, mu_tilde, lower=1)
|
||||
|
||||
log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde)) # TODO: add log Z_hat??
|
||||
|
||||
dL_dK = 0.5 * (tdot(alpha[:,None]) - Wi)
|
||||
|
||||
dL_dthetaL = np.zeros(likelihood.size)#TODO: derivatives of the likelihood parameters
|
||||
|
||||
return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
|
||||
return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, gaussian_variance=1./tau_tilde, K=K)
|
||||
|
||||
def expectation_propagation(self, K, Y, likelihood, Y_metadata):
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class EPDTC(VarDTC):
|
|||
return super(EPDTC, self).inference(kern, X, Z, likelihood, mu_tilde,
|
||||
mean_function=mean_function,
|
||||
Y_metadata=Y_metadata,
|
||||
beta=tau_tilde,
|
||||
gaussian_variance=tau_tilde,
|
||||
Lm=Lm, dL_dKmm=dL_dKmm,
|
||||
psi0=psi0, psi1=psi1, psi2=psi2)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class VarDTC(LatentFunctionInference):
|
|||
def get_VVTfactor(self, Y, prec):
|
||||
return Y * prec # TODO chache this, and make it effective
|
||||
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, mean_function=None, beta=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, mean_function=None, gaussian_variance=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||
assert mean_function is None, "inference with a mean function not implemented"
|
||||
|
||||
num_data, output_dim = Y.shape
|
||||
|
|
@ -72,16 +72,16 @@ class VarDTC(LatentFunctionInference):
|
|||
|
||||
uncertain_inputs = isinstance(X, VariationalPosterior)
|
||||
|
||||
if beta is None:
|
||||
if gaussian_variance is None:
|
||||
#assume Gaussian likelihood
|
||||
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), self.const_jitter)
|
||||
gaussian_variance = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), self.const_jitter)
|
||||
|
||||
if beta.ndim == 1:
|
||||
beta = beta[:, None]
|
||||
het_noise = beta.size > 1
|
||||
if gaussian_variance.ndim == 1:
|
||||
gaussian_variance = gaussian_variance[:, None]
|
||||
het_noise = gaussian_variance.size > 1
|
||||
|
||||
VVT_factor = beta*Y
|
||||
#VVT_factor = beta*Y
|
||||
VVT_factor = gaussian_variance*Y
|
||||
#VVT_factor = gaussian_variance*Y
|
||||
trYYT = self.get_trYYT(Y)
|
||||
|
||||
# kernel computations, using BGPLVM notation
|
||||
|
|
@ -98,16 +98,16 @@ class VarDTC(LatentFunctionInference):
|
|||
psi1 = kern.psi1(Z, X)
|
||||
if het_noise:
|
||||
if psi2 is None:
|
||||
psi2_beta = (kern.psi2n(Z, X) * beta[:, :, None]).sum(0)
|
||||
psi2_beta = (kern.psi2n(Z, X) * gaussian_variance[:, :, None]).sum(0)
|
||||
else:
|
||||
psi2_beta = (psi2 * beta[:, :, None]).sum(0)
|
||||
psi2_beta = (psi2 * gaussian_variance[:, :, None]).sum(0)
|
||||
else:
|
||||
if psi2 is None:
|
||||
psi2_beta = kern.psi2(Z,X) * beta
|
||||
psi2_beta = kern.psi2(Z,X) * gaussian_variance
|
||||
elif psi2.ndim == 3:
|
||||
psi2_beta = psi2.sum(0) * beta
|
||||
psi2_beta = psi2.sum(0) * gaussian_variance
|
||||
else:
|
||||
psi2_beta = psi2 * beta
|
||||
psi2_beta = psi2 * gaussian_variance
|
||||
LmInv = dtrtri(Lm)
|
||||
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
||||
else:
|
||||
|
|
@ -116,9 +116,9 @@ class VarDTC(LatentFunctionInference):
|
|||
if psi1 is None:
|
||||
psi1 = kern.K(X, Z)
|
||||
if het_noise:
|
||||
tmp = psi1 * (np.sqrt(beta))
|
||||
tmp = psi1 * (np.sqrt(gaussian_variance))
|
||||
else:
|
||||
tmp = psi1 * (np.sqrt(beta))
|
||||
tmp = psi1 * (np.sqrt(gaussian_variance))
|
||||
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
|
||||
A = tdot(tmp) #print A.sum()
|
||||
|
||||
|
|
@ -144,19 +144,19 @@ class VarDTC(LatentFunctionInference):
|
|||
dL_dKmm = backsub_both_sides(Lm, delit)
|
||||
|
||||
# derivatives of L w.r.t. psi
|
||||
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm,
|
||||
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, gaussian_variance, Lm,
|
||||
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
|
||||
psi1, het_noise, uncertain_inputs)
|
||||
|
||||
# log marginal likelihood
|
||||
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
|
||||
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, gaussian_variance, het_noise,
|
||||
psi0, A, LB, trYYT, data_fit, Y)
|
||||
|
||||
#noise derivatives
|
||||
dL_dR = _compute_dL_dR(likelihood,
|
||||
het_noise, uncertain_inputs, LB,
|
||||
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
|
||||
psi0, psi1, beta,
|
||||
psi0, psi1, gaussian_variance,
|
||||
data_fit, num_data, output_dim, trYYT, Y, VVT_factor)
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
|
||||
|
|
@ -181,7 +181,7 @@ class VarDTC(LatentFunctionInference):
|
|||
else:
|
||||
print('foobar')
|
||||
import ipdb; ipdb.set_trace()
|
||||
psi1V = np.dot(Y.T*beta, psi1).T
|
||||
psi1V = np.dot(Y.T*gaussian_variance, psi1).T
|
||||
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
|
||||
tmp, _ = dpotrs(LB, tmp, lower=1)
|
||||
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
|
||||
|
|
|
|||
|
|
@ -258,4 +258,4 @@ class Bernoulli(Likelihood):
|
|||
return Ysim.reshape(orig_shape)
|
||||
|
||||
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
|
||||
pass
|
||||
return np.zeros(self.size)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue