mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
very weird merge conflict, including in files that I did not change
This commit is contained in:
commit
601175de2d
73 changed files with 2234 additions and 1567 deletions
|
|
@ -16,8 +16,8 @@ If the likelihood object is something other than Gaussian, then exact inference
|
|||
is not tractable. We then resort to a Laplace approximation (laplace.py) or
|
||||
expectation propagation (ep.py).
|
||||
|
||||
The inference methods return a
|
||||
:class:`~GPy.inference.latent_function_inference.posterior.Posterior`
|
||||
The inference methods return a
|
||||
:class:`~GPy.inference.latent_function_inference.posterior.Posterior`
|
||||
instance, which is a simple
|
||||
structure which contains a summary of the posterior. The model classes can then
|
||||
use this posterior object for making predictions, optimizing hyper-parameters,
|
||||
|
|
@ -27,19 +27,19 @@ etc.
|
|||
|
||||
from exact_gaussian_inference import ExactGaussianInference
|
||||
from laplace import Laplace
|
||||
expectation_propagation = 'foo' # TODO
|
||||
from GPy.inference.latent_function_inference.var_dtc import VarDTC
|
||||
from expectation_propagation import EP
|
||||
from dtc import DTC
|
||||
from fitc import FITC
|
||||
|
||||
# class FullLatentFunctionData(object):
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
# class LatentFunctionInference(object):
|
||||
# def inference(self, kern, X, likelihood, Y, Y_metadata=None):
|
||||
# """
|
||||
# Do inference on the latent functions given a covariance function `kern`,
|
||||
# inputs and outputs `X` and `Y`, and a likelihood `likelihood`.
|
||||
# inputs and outputs `X` and `Y`, and a likelihood `likelihood`.
|
||||
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
|
||||
# """
|
||||
# raise NotImplementedError, "Abstract base class for full inference"
|
||||
# raise NotImplementedError, "Abstract base class for full inference"
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class DTC(object):
|
|||
def __init__(self):
|
||||
self.const_jitter = 1e-6
|
||||
|
||||
def inference(self, kern, X, X_variance, Z, likelihood, Y):
|
||||
def inference(self, kern, X, Z, likelihood, Y):
|
||||
assert X_variance is None, "cannot use X_variance with DTC. Try varDTC."
|
||||
|
||||
#TODO: MAX! fix this!
|
||||
|
|
@ -40,7 +40,7 @@ class DTC(object):
|
|||
U = Knm
|
||||
Uy = np.dot(U.T,Y)
|
||||
|
||||
#factor Kmm
|
||||
#factor Kmm
|
||||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
# Compute A
|
||||
|
|
@ -78,11 +78,9 @@ class DTC(object):
|
|||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*beta**2
|
||||
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T}
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
|
||||
#update gradients
|
||||
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
|
||||
likelihood.update_gradients(dL_dR)
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
|
@ -158,11 +156,8 @@ class vDTC(object):
|
|||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*beta**2
|
||||
dL_dR -=beta*trace_term/num_data
|
||||
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T}
|
||||
|
||||
#update gradients
|
||||
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
|
||||
likelihood.update_gradients(dL_dR)
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
from posterior import Posterior
|
||||
from ...util.linalg import pdinv, dpotrs, tdot
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
@ -41,7 +42,9 @@ class ExactGaussianInference(object):
|
|||
|
||||
K = kern.K(X)
|
||||
|
||||
Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, Y_metadata))
|
||||
Ky = K.copy()
|
||||
diag.add(Ky, likelihood.gaussian_variance(Y, Y_metadata))
|
||||
Wi, LW, LWi, W_logdet = pdinv(Ky)
|
||||
|
||||
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
|
||||
|
||||
|
|
@ -49,9 +52,6 @@ class ExactGaussianInference(object):
|
|||
|
||||
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
|
||||
|
||||
#TODO: does this really live here?
|
||||
likelihood.update_gradients(np.diag(dL_dK))
|
||||
|
||||
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK}
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata)
|
||||
|
||||
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import numpy as np
|
||||
from scipy import stats
|
||||
from ..util.linalg import pdinv,mdot,jitchol,chol_inv,DSYR,tdot,dtrtrs
|
||||
from likelihood import likelihood
|
||||
from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs
|
||||
from posterior import Posterior
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class EP(object):
|
||||
def __init__(self, epsilon=1e-6, eta=1., delta=1.):
|
||||
|
|
@ -28,30 +28,30 @@ class EP(object):
|
|||
|
||||
K = kern.K(X)
|
||||
|
||||
mu_tilde, tau_tilde = self.expectation_propagation()
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self.expectation_propagation(K, Y, likelihood, Y_metadata)
|
||||
|
||||
Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde)
|
||||
Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde))
|
||||
|
||||
alpha, _ = dpotrs(LW, mu_tilde, lower=1)
|
||||
|
||||
log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde))
|
||||
log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde)) # TODO: add log Z_hat??
|
||||
|
||||
dL_dK = 0.5 * (tdot(alpha[:,None]) - Wi)
|
||||
|
||||
#TODO: what abot derivatives of the likelihood parameters?
|
||||
dL_dthetaL = np.zeros(likelihood.size)#TODO: derivatives of the likelihood parameters
|
||||
|
||||
return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK}
|
||||
return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
|
||||
|
||||
def expectation_propagation(self, K, Y, Y_metadata, likelihood)
|
||||
def expectation_propagation(self, K, Y, likelihood, Y_metadata):
|
||||
|
||||
num_data, data_dim = Y.shape
|
||||
assert data_dim == 1, "This EP methods only works for 1D outputs"
|
||||
|
||||
|
||||
#Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
|
||||
mu = np.zeros(self.num_data)
|
||||
mu = np.zeros(num_data)
|
||||
Sigma = K.copy()
|
||||
|
||||
#Initial values - Marginal moments
|
||||
|
|
@ -61,33 +61,32 @@ class EP(object):
|
|||
|
||||
#initial values - Gaussian factors
|
||||
if self.old_mutilde is None:
|
||||
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data, num_data))
|
||||
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data))
|
||||
else:
|
||||
assert old_mutilde.size == num_data, "data size mis-match: did you change the data? try resetting!"
|
||||
mu_tilde, v_tilde = self.old_mutilde, self.old_vtilde
|
||||
tau_tilde = v_tilde/mu_tilde
|
||||
|
||||
#Approximation
|
||||
epsilon_np1 = self.epsilon + 1.
|
||||
epsilon_np2 = self.epsilon + 1.
|
||||
tau_diff = self.epsilon + 1.
|
||||
v_diff = self.epsilon + 1.
|
||||
iterations = 0
|
||||
while (epsilon_np1 > self.epsilon) or (epsilon_np2 > self.epsilon):
|
||||
while (tau_diff > self.epsilon) or (v_diff > self.epsilon):
|
||||
update_order = np.random.permutation(num_data)
|
||||
for i in update_order:
|
||||
#Cavity distribution parameters
|
||||
tau_cav = 1./Sigma[i,i] - self.eta*tau_tilde[i]
|
||||
v_cav = mu[i]/Sigma[i,i] - self.eta*v_tilde[i]
|
||||
#Marginal moments
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match(Y[i], tau_cav, v_cav, Y_metadata=(None if Y_metadata is None else Y_metadata[i]))
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
|
||||
#Site parameters update
|
||||
delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i])
|
||||
delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i])
|
||||
tau_tilde[i] += delta_tau
|
||||
v_tilde[i] += delta_v
|
||||
#Posterior distribution parameters update
|
||||
DSYR(Sigma, Sigma[:,i].copy(), -Delta_tau/(1.+ Delta_tau*Sigma[i,i]))
|
||||
DSYR(Sigma, Sigma[:,i].copy(), -delta_tau/(1.+ delta_tau*Sigma[i,i]))
|
||||
mu = np.dot(Sigma, v_tilde)
|
||||
iterations += 1
|
||||
|
||||
#(re) compute Sigma and mu using full Cholesky decompy
|
||||
tau_tilde_root = np.sqrt(tau_tilde)
|
||||
|
|
@ -99,10 +98,14 @@ class EP(object):
|
|||
mu = np.dot(Sigma,v_tilde)
|
||||
|
||||
#monitor convergence
|
||||
epsilon_np1 = np.mean(np.square(tau_tilde-tau_tilde_old))
|
||||
epsilon_np2 = np.mean(np.square(v_tilde-v_tilde_old))
|
||||
if iterations>0:
|
||||
tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old))
|
||||
v_diff = np.mean(np.square(v_tilde-v_tilde_old))
|
||||
tau_tilde_old = tau_tilde.copy()
|
||||
v_tilde_old = v_tilde.copy()
|
||||
|
||||
return mu, Sigma, mu_tilde, tau_tilde
|
||||
iterations += 1
|
||||
|
||||
mu_tilde = v_tilde/tau_tilde
|
||||
return mu, Sigma, mu_tilde, tau_tilde, Z_hat
|
||||
|
||||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
from posterior import Posterior
|
||||
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
@ -14,15 +15,9 @@ class FITC(object):
|
|||
the posterior.
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self.const_jitter = 1e-6
|
||||
const_jitter = 1e-6
|
||||
|
||||
def inference(self, kern, X, X_variance, Z, likelihood, Y):
|
||||
assert X_variance is None, "cannot use X_variance with FITC. Try varDTC."
|
||||
|
||||
#TODO: MAX! fix this!
|
||||
from ...util.misc import param_to_array
|
||||
Y = param_to_array(Y)
|
||||
def inference(self, kern, X, Z, likelihood, Y):
|
||||
|
||||
num_inducing, _ = Z.shape
|
||||
num_data, output_dim = Y.shape
|
||||
|
|
@ -37,7 +32,8 @@ class FITC(object):
|
|||
Knm = kern.K(X, Z)
|
||||
U = Knm
|
||||
|
||||
#factor Kmm
|
||||
#factor Kmm
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
#compute beta_star, the effective noise precision
|
||||
|
|
@ -73,7 +69,7 @@ class FITC(object):
|
|||
vvT_P = tdot(v.reshape(-1,1)) + P
|
||||
dL_dK = 0.5*(Kmmi - vvT_P)
|
||||
KiU = np.dot(Kmmi, U.T)
|
||||
dL_dK += np.dot(KiU*dL_dR, KiU.T)
|
||||
dL_dK += np.dot(KiU*dL_dR, KiU.T)
|
||||
|
||||
# Compute dL_dU
|
||||
vY = np.dot(v.reshape(-1,1),Y.T)
|
||||
|
|
@ -81,11 +77,8 @@ class FITC(object):
|
|||
dL_dU *= beta_star
|
||||
dL_dU -= 2.*KiU*dL_dR
|
||||
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T}
|
||||
|
||||
#update gradients
|
||||
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
|
||||
likelihood.update_gradients(dL_dR)
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
|
|
|||
|
|
@ -52,15 +52,13 @@ class Laplace(object):
|
|||
|
||||
f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
|
||||
self.f_hat = f_hat
|
||||
|
||||
self.Ki_fhat = Ki_fhat
|
||||
self.K = K.copy()
|
||||
#Compute hessian and other variables at mode
|
||||
log_marginal, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata)
|
||||
|
||||
kern.update_gradients_full(dL_dK, X)
|
||||
likelihood.update_gradients(dL_dthetaL)
|
||||
|
||||
self._previous_Ki_fhat = Ki_fhat.copy()
|
||||
return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK}
|
||||
return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
|
||||
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
|
||||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
|
|
@ -28,7 +29,7 @@ class VarDTC(object):
|
|||
def set_limit(self, limit):
|
||||
self.get_trYYT.limit = limit
|
||||
self.get_YYTfactor.limit = limit
|
||||
|
||||
|
||||
def _get_trYYT(self, Y):
|
||||
return param_to_array(np.sum(np.square(Y)))
|
||||
|
||||
|
|
@ -47,7 +48,7 @@ class VarDTC(object):
|
|||
def get_VVTfactor(self, Y, prec):
|
||||
return Y * prec # TODO chache this, and make it effective
|
||||
|
||||
def inference(self, kern, X, Z, likelihood, Y):
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
|
||||
if isinstance(X, VariationalPosterior):
|
||||
uncertain_inputs = True
|
||||
psi0 = kern.psi0(Z, X)
|
||||
|
|
@ -64,7 +65,7 @@ class VarDTC(object):
|
|||
_, output_dim = Y.shape
|
||||
|
||||
#see whether we've got a different noise variance for each datum
|
||||
beta = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
beta = 1./np.fmax(likelihood.gaussian_variance(Y, Y_metadata), 1e-6)
|
||||
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
|
||||
#self.YYTfactor = self.get_YYTfactor(Y)
|
||||
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
|
||||
|
|
@ -73,13 +74,14 @@ class VarDTC(object):
|
|||
trYYT = self.get_trYYT(Y)
|
||||
|
||||
# do the inference:
|
||||
het_noise = beta.size < 1
|
||||
het_noise = beta.size > 1
|
||||
num_inducing = Z.shape[0]
|
||||
num_data = Y.shape[0]
|
||||
# kernel computations, using BGPLVM notation
|
||||
Kmm = kern.K(Z)
|
||||
|
||||
Lm = jitchol(Kmm+np.eye(Z.shape[0])*self.const_jitter)
|
||||
Kmm = kern.K(Z).copy()
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
Lm = jitchol(Kmm)
|
||||
|
||||
# The rather complex computations of A
|
||||
if uncertain_inputs:
|
||||
|
|
@ -132,28 +134,28 @@ class VarDTC(object):
|
|||
|
||||
# log marginal likelihood
|
||||
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
|
||||
psi0, A, LB, trYYT, data_fit)
|
||||
psi0, A, LB, trYYT, data_fit, VVT_factor)
|
||||
|
||||
#put the gradients in the right places
|
||||
partial_for_likelihood = _compute_partial_for_likelihood(likelihood,
|
||||
dL_dR = _compute_dL_dR(likelihood,
|
||||
het_noise, uncertain_inputs, LB,
|
||||
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
|
||||
psi0, psi1, beta,
|
||||
data_fit, num_data, output_dim, trYYT)
|
||||
data_fit, num_data, output_dim, trYYT, Y)
|
||||
|
||||
#likelihood.update_gradients(partial_for_likelihood)
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
|
||||
|
||||
if uncertain_inputs:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dpsi0':dL_dpsi0,
|
||||
'dL_dpsi1':dL_dpsi1,
|
||||
'dL_dpsi2':dL_dpsi2,
|
||||
'partial_for_likelihood':partial_for_likelihood}
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
else:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dKdiag':dL_dpsi0,
|
||||
'dL_dKnm':dL_dpsi1,
|
||||
'partial_for_likelihood':partial_for_likelihood}
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#get sufficient things for posterior prediction
|
||||
#TODO: do we really want to do this in the loop?
|
||||
|
|
@ -168,7 +170,6 @@ class VarDTC(object):
|
|||
Bi, _ = dpotri(LB, lower=1)
|
||||
symmetrify(Bi)
|
||||
Bi = -dpotri(LB, lower=1)[0]
|
||||
from ...util import diag
|
||||
diag.add(Bi, 1)
|
||||
|
||||
woodbury_inv = backsub_both_sides(Lm, Bi)
|
||||
|
|
@ -207,7 +208,7 @@ class VarDTCMissingData(object):
|
|||
self._subarray_indices = [[slice(None),slice(None)]]
|
||||
return [Y], [(Y**2).sum()]
|
||||
|
||||
def inference(self, kern, X, Z, likelihood, Y):
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
|
||||
if isinstance(X, VariationalPosterior):
|
||||
uncertain_inputs = True
|
||||
psi0_all = kern.psi0(Z, X)
|
||||
|
|
@ -220,7 +221,7 @@ class VarDTCMissingData(object):
|
|||
psi2_all = None
|
||||
|
||||
Ys, traces = self._Y(Y)
|
||||
beta_all = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
|
||||
het_noise = beta_all.size != 1
|
||||
|
||||
import itertools
|
||||
|
|
@ -231,13 +232,14 @@ class VarDTCMissingData(object):
|
|||
if uncertain_inputs:
|
||||
dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing))
|
||||
|
||||
partial_for_likelihood = 0
|
||||
dL_dR = 0
|
||||
woodbury_vector = np.zeros((num_inducing, Y.shape[1]))
|
||||
woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1]))
|
||||
dL_dKmm = 0
|
||||
log_marginal = 0
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
Kmm = kern.K(Z).copy()
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
#factor Kmm
|
||||
Lm = jitchol(Kmm)
|
||||
if uncertain_inputs: LmInv = dtrtri(Lm)
|
||||
|
|
@ -303,10 +305,10 @@ class VarDTCMissingData(object):
|
|||
|
||||
# log marginal likelihood
|
||||
log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
|
||||
psi0, A, LB, trYYT, data_fit)
|
||||
psi0, A, LB, trYYT, data_fit,VVT_factor)
|
||||
|
||||
#put the gradients in the right places
|
||||
partial_for_likelihood += _compute_partial_for_likelihood(likelihood,
|
||||
dL_dR += _compute_dL_dR(likelihood,
|
||||
het_noise, uncertain_inputs, LB,
|
||||
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
|
||||
psi0, psi1, beta,
|
||||
|
|
@ -323,22 +325,23 @@ class VarDTCMissingData(object):
|
|||
Bi, _ = dpotri(LB, lower=1)
|
||||
symmetrify(Bi)
|
||||
Bi = -dpotri(LB, lower=1)[0]
|
||||
from ...util import diag
|
||||
diag.add(Bi, 1)
|
||||
woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None]
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
|
||||
# gradients:
|
||||
if uncertain_inputs:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dpsi0':dL_dpsi0_all,
|
||||
'dL_dpsi1':dL_dpsi1_all,
|
||||
'dL_dpsi2':dL_dpsi2_all,
|
||||
'partial_for_likelihood':partial_for_likelihood}
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
else:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dKdiag':dL_dpsi0_all,
|
||||
'dL_dKnm':dL_dpsi1_all,
|
||||
'partial_for_likelihood':partial_for_likelihood}
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#get sufficient things for posterior prediction
|
||||
#TODO: do we really want to do this in the loop?
|
||||
|
|
@ -384,40 +387,41 @@ def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, C
|
|||
return dL_dpsi0, dL_dpsi1, dL_dpsi2
|
||||
|
||||
|
||||
def _compute_partial_for_likelihood(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT):
|
||||
def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT, Y):
|
||||
# the partial derivative vector for the likelihood
|
||||
if likelihood.size == 0:
|
||||
# save computation here.
|
||||
partial_for_likelihood = None
|
||||
dL_dR = None
|
||||
elif het_noise:
|
||||
if uncertain_inputs:
|
||||
raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
|
||||
else:
|
||||
from ...util.linalg import chol_inv
|
||||
LBi = chol_inv(LB)
|
||||
#from ...util.linalg import chol_inv
|
||||
#LBi = chol_inv(LB)
|
||||
LBi, _ = dtrtrs(LB,np.eye(LB.shape[0]))
|
||||
|
||||
Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0)
|
||||
_LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0)
|
||||
|
||||
partial_for_likelihood = -0.5 * beta + 0.5 * likelihood.V**2
|
||||
partial_for_likelihood += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2
|
||||
dL_dR = -0.5 * beta + 0.5 * (beta*Y)**2
|
||||
dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2
|
||||
|
||||
partial_for_likelihood += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2
|
||||
|
||||
partial_for_likelihood += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * likelihood.Y * beta**2
|
||||
partial_for_likelihood += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2
|
||||
dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2
|
||||
|
||||
dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * Y * beta**2
|
||||
dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2
|
||||
else:
|
||||
# likelihood is not heteroscedatic
|
||||
partial_for_likelihood = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2
|
||||
partial_for_likelihood += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta)
|
||||
partial_for_likelihood += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit)
|
||||
return partial_for_likelihood
|
||||
dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2
|
||||
dL_dR += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta)
|
||||
dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit)
|
||||
return dL_dR
|
||||
|
||||
def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit):
|
||||
#compute log marginal likelihood
|
||||
def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit,Y):
|
||||
#compute log marginal likelihood
|
||||
if het_noise:
|
||||
lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(likelihood.V * likelihood.Y)
|
||||
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
|
||||
lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(beta * np.square(Y).sum(axis=-1))
|
||||
lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A))
|
||||
else:
|
||||
lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT
|
||||
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue