mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 05:52:38 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
This commit is contained in:
commit
ee5b05cd2d
16 changed files with 645 additions and 272 deletions
|
|
@ -54,6 +54,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
self._savedgradients = []
|
||||
self._savederrors = []
|
||||
self._savedpsiKmm = []
|
||||
self._savedABCD = []
|
||||
|
||||
sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
|
||||
|
|
@ -135,6 +136,17 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
self._savedparams.append([self.f_call, self._get_params()])
|
||||
self._savedgradients.append([self.f_call, self._log_likelihood_gradients()])
|
||||
self._savedpsiKmm.append([self.f_call, [self.Kmm, self.dL_dKmm]])
|
||||
sf2 = self.scale_factor ** 2
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.V * self.likelihood.Y)
|
||||
B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A) * sf2)
|
||||
else:
|
||||
A = -0.5 * self.N * self.D * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
B = -0.5 * self.D * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A) * sf2)
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))) + 0.5 * self.M * np.log(sf2))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
self._savedABCD.append([self.f_call, A, B, C, D])
|
||||
|
||||
# print "\nkl:", kl, "ll:", ll
|
||||
return ll - kl
|
||||
|
||||
|
|
@ -169,7 +181,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
ax.plot(self.Z[:, input_1], self.Z[:, input_2], '^w')
|
||||
return ax
|
||||
|
||||
def plot_X_1d(self, fig=None, axes=None, fig_num="MRD X 1d", colors=None):
|
||||
def plot_X_1d(self, fig=None, axes=None, fig_num="LVM mu S 1d", colors=None):
|
||||
"""
|
||||
Plot latent space X in 1D:
|
||||
|
||||
|
|
@ -196,7 +208,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
else:
|
||||
ax = axes[i]
|
||||
ax.plot(self.X, c='k', alpha=.3)
|
||||
plots.extend(ax.plot(self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{}}}$".format(i)))
|
||||
plots.extend(ax.plot(self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
|
||||
ax.fill_between(np.arange(self.X.shape[0]),
|
||||
self.X.T[i] - 2 * np.sqrt(self.X_variance.T[i]),
|
||||
self.X.T[i] + 2 * np.sqrt(self.X_variance.T[i]),
|
||||
|
|
@ -251,6 +263,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
gradient_dict = dict(self._savedgradients)
|
||||
kmm_dict = dict(self._savedpsiKmm)
|
||||
iters = np.array(param_dict.keys())
|
||||
ABCD_dict = np.array(self._savedABCD)
|
||||
self.showing = 0
|
||||
|
||||
# ax2 = pylab.subplot2grid(splotshape, (1, 0), 2, 4)
|
||||
|
|
@ -281,14 +294,26 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
ha='center', va='center')
|
||||
figs[-1].canvas.draw()
|
||||
figs[-1].tight_layout(rect=(.15, 0, 1, .86))
|
||||
# figs.append(pylab.figure("BGPLVM DEBUG Kmm", figsize=(12, 6)))
|
||||
# fig = figs[-1]
|
||||
# ax6 = fig.add_subplot(121)
|
||||
# ax6.text(.5, .5, r"${\mathbf{K}_{mm}}$", color='magenta', alpha=.5, transform=ax6.transAxes,
|
||||
# ha='center', va='center')
|
||||
# ax7 = fig.add_subplot(122)
|
||||
# ax7.text(.5, .5, r"${\frac{dL}{dK_{mm}}}$", color='magenta', alpha=.5, transform=ax7.transAxes,
|
||||
# ha='center', va='center')
|
||||
figs.append(pylab.figure("BGPLVM DEBUG Kmm", figsize=(12, 6)))
|
||||
fig = figs[-1]
|
||||
ax6 = fig.add_subplot(121)
|
||||
ax6.text(.5, .5, r"${\mathbf{K}_{mm}}$", color='magenta', alpha=.5, transform=ax6.transAxes,
|
||||
ha='center', va='center')
|
||||
ax7 = fig.add_subplot(122)
|
||||
ax7.text(.5, .5, r"${\frac{dL}{dK_{mm}}}$", color='magenta', alpha=.5, transform=ax7.transAxes,
|
||||
ax8 = fig.add_subplot(121)
|
||||
ax8.text(.5, .5, r"${\mathbf{A,B,C,D}}$", color='k', alpha=.5, transform=ax8.transAxes,
|
||||
ha='center', va='center')
|
||||
ax8.plot(ABCD_dict[:, 0], ABCD_dict[:, 1], label='A')
|
||||
ax8.plot(ABCD_dict[:, 0], ABCD_dict[:, 2], label='B')
|
||||
ax8.plot(ABCD_dict[:, 0], ABCD_dict[:, 3], label='C')
|
||||
ax8.plot(ABCD_dict[:, 0], ABCD_dict[:, 4], label='D')
|
||||
ax8.legend()
|
||||
figs[-1].canvas.draw()
|
||||
figs[-1].tight_layout(rect=(.15, 0, 1, .86))
|
||||
|
||||
X, S, Z, theta = self._debug_filter_params(param_dict[self.showing])
|
||||
Xg, Sg, Zg, thetag = self._debug_filter_params(gradient_dict[self.showing])
|
||||
|
|
@ -330,16 +355,16 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
ax5.set_xticks(np.arange(len(theta)))
|
||||
ax5.set_xticklabels(self._get_param_names()[-len(theta):], rotation=17)
|
||||
|
||||
imkmm = ax6.imshow(kmm_dict[self.showing][0])
|
||||
from mpl_toolkits.axes_grid1 import make_axes_locatable
|
||||
divider = make_axes_locatable(ax6)
|
||||
caxkmm = divider.append_axes("right", "5%", pad="1%")
|
||||
cbarkmm = pylab.colorbar(imkmm, cax=caxkmm)
|
||||
|
||||
imkmmdl = ax7.imshow(kmm_dict[self.showing][1])
|
||||
divider = make_axes_locatable(ax7)
|
||||
caxkmmdl = divider.append_axes("right", "5%", pad="1%")
|
||||
cbarkmmdl = pylab.colorbar(imkmmdl, cax=caxkmmdl)
|
||||
# imkmm = ax6.imshow(kmm_dict[self.showing][0])
|
||||
# from mpl_toolkits.axes_grid1 import make_axes_locatable
|
||||
# divider = make_axes_locatable(ax6)
|
||||
# caxkmm = divider.append_axes("right", "5%", pad="1%")
|
||||
# cbarkmm = pylab.colorbar(imkmm, cax=caxkmm)
|
||||
#
|
||||
# imkmmdl = ax7.imshow(kmm_dict[self.showing][1])
|
||||
# divider = make_axes_locatable(ax7)
|
||||
# caxkmmdl = divider.append_axes("right", "5%", pad="1%")
|
||||
# cbarkmmdl = pylab.colorbar(imkmmdl, cax=caxkmmdl)
|
||||
|
||||
# Qleg = ax1.legend(Xlatentplts, [r"$Q_{}$".format(i + 1) for i in range(self.Q)],
|
||||
# loc=3, ncol=self.Q, bbox_to_anchor=(0, 1.15, 1, 1.15),
|
||||
|
|
@ -420,13 +445,13 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
thetagrads.set_offsets(np.array([xtheta.ravel(), theta.ravel()]).T)
|
||||
thetagrads.set_UVC(Utheta, thetag)
|
||||
|
||||
imkmm.set_data(kmm_dict[self.showing][0])
|
||||
imkmm.autoscale()
|
||||
cbarkmm.update_normal(imkmm)
|
||||
|
||||
imkmmdl.set_data(kmm_dict[self.showing][1])
|
||||
imkmmdl.autoscale()
|
||||
cbarkmmdl.update_normal(imkmmdl)
|
||||
# imkmm.set_data(kmm_dict[self.showing][0])
|
||||
# imkmm.autoscale()
|
||||
# cbarkmm.update_normal(imkmm)
|
||||
#
|
||||
# imkmmdl.set_data(kmm_dict[self.showing][1])
|
||||
# imkmmdl.autoscale()
|
||||
# cbarkmmdl.update_normal(imkmmdl)
|
||||
|
||||
ax2.relim()
|
||||
# ax3.relim()
|
||||
|
|
@ -452,4 +477,4 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
cidp = figs[0].canvas.mpl_connect('button_press_event', onclick)
|
||||
cidd = figs[0].canvas.mpl_connect('motion_notify_event', motion)
|
||||
|
||||
return ax1, ax2, ax3, ax4, ax5, ax6, ax7
|
||||
return ax1, ax2, ax3, ax4, ax5 # , ax6, ax7
|
||||
|
|
|
|||
248
GPy/models/FITC.py
Normal file
248
GPy/models/FITC.py
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
import pylab as pb
|
||||
from ..util.linalg import mdot, jitchol, tdot, symmetrify,pdinv
|
||||
#from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot
|
||||
from ..util.plot import gpplot
|
||||
from .. import kern
|
||||
from scipy import stats, linalg
|
||||
from sparse_GP import sparse_GP
|
||||
|
||||
def backsub_both_sides(L,X):
|
||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
|
||||
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||
|
||||
class FITC(sparse_GP):
|
||||
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
||||
|
||||
self.Z = Z
|
||||
self.M = self.Z.shape[0]
|
||||
self.true_precision = likelihood.precision
|
||||
|
||||
|
||||
#ERASEME
|
||||
#N = likelihood.Y.size
|
||||
#self.beta_star = np.random.rand(N,1)
|
||||
#self.Kmm_ = kernel.K(self.Z).copy()
|
||||
#self.Kmmi_,a,b,c = pdinv(self.Kmm_)
|
||||
#self.psi1_ = kernel.K(self.Z,X).copy()
|
||||
|
||||
sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False)
|
||||
|
||||
def _set_params(self, p):
|
||||
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
self.scale_factor = 1.
|
||||
self._computations()
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
Approximates a non-gaussian likelihood using Expectation Propagation
|
||||
|
||||
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
|
||||
this function does nothing
|
||||
|
||||
Diag(Knn - Qnn) is added to the noise term to use the tools already implemented in sparse_GP.
|
||||
The true precison is now 'true_precision' not 'precision'.
|
||||
"""
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
def _computations(self):
|
||||
|
||||
#factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
|
||||
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
|
||||
self.Diag0 = self.psi0 - np.diag(self.Qnn)
|
||||
|
||||
#TODO eraseme
|
||||
"""
|
||||
self.psi1 = self.psi1_
|
||||
self.Lm = jitchol(self.Kmm_)
|
||||
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||
#self.true_psi1 = self.kern.K(self.Z,self.X)
|
||||
#self.Qnn = mdot(self.true_psi1.T,self.Lmi.T,self.Lmi,self.true_psi1)
|
||||
self.Kmmi, a,b,c = pdinv(self.Kmm)
|
||||
self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1)
|
||||
#self.Diag0 = self.psi0 #- np.diag(self.Qnn)
|
||||
self.Diag0 = - np.diag(self.Qnn)
|
||||
#Kmmi,Lm,Lmi,logdetK = pdinv(self.Kmm)
|
||||
#self.Lambda = self.Kmmi_ + mdot(self.Kmmi_,self.psi1_,self.beta_star*self.psi1_.T,self.Kmmi_) + np.eye(self.M)*100
|
||||
#self.Lambdai, LLm, LLmi, self.logdetLambda = pdinv(self.Lambda)
|
||||
"""
|
||||
|
||||
#TODO uncomment
|
||||
self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision
|
||||
self.V_star = self.beta_star * self.likelihood.Y
|
||||
|
||||
# The rather complex computations of self.A
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
||||
# factor B
|
||||
self.B = np.eye(self.M) + self.A
|
||||
self.LB = jitchol(self.B)
|
||||
self.LBi,info = linalg.lapack.flapack.dtrtrs(self.LB,np.eye(self.M),lower=1)
|
||||
self.psi1V = np.dot(self.psi1, self.V_star)
|
||||
|
||||
# back substutue C into psi1V
|
||||
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
||||
|
||||
# dlogbeta_dtheta
|
||||
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
|
||||
b_psi1_Ki = self.beta_star * Kmmipsi1.T
|
||||
Ki_pbp_Ki = np.dot(Kmmipsi1,b_psi1_Ki)
|
||||
dlogB_dpsi0 = -.5*self.kern.dKdiag_dtheta(self.beta_star,X=self.X)
|
||||
dlogB_dpsi1 = self.kern.dK_dtheta(b_psi1_Ki,self.X,self.Z)
|
||||
dlogB_dKmm = -.5*self.kern.dK_dtheta(Ki_pbp_Ki,X=self.Z)
|
||||
self.dlogbeta_dtheta = dlogB_dpsi0 + dlogB_dpsi1 + dlogB_dKmm
|
||||
|
||||
# dyby_dtheta
|
||||
Kmmi = np.dot(self.Lmi.T,self.Lmi)
|
||||
VVT = np.outer(self.V_star,self.V_star)
|
||||
VV_p_Ki = np.dot(VVT,Kmmipsi1.T)
|
||||
Ki_pVVp_Ki = np.dot(Kmmipsi1,VV_p_Ki)
|
||||
dyby_dpsi0 = .5 * self.kern.dKdiag_dtheta(self.V_star**2,self.X)
|
||||
|
||||
dyby_dpsi1 = 0
|
||||
dyby_dKmm = 0
|
||||
dyby_dtheta = dyby_dpsi0
|
||||
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
|
||||
dyby_dpsi1 = -V_n**2 * np.dot(psi1_n[None,:],Kmmi)
|
||||
dyby_dtheta += self.kern.dK_dtheta(dyby_dpsi1,X_n[:,None],self.Z)
|
||||
|
||||
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
|
||||
psin_K = np.dot(psi1_n[None,:],Kmmi)
|
||||
tmp = np.dot(psin_K.T,psin_K)
|
||||
dyby_dKmm = .5*V_n**2 * tmp
|
||||
dyby_dtheta += self.kern.dK_dtheta(dyby_dKmm,self.Z)
|
||||
self.dyby_dtheta = dyby_dtheta
|
||||
|
||||
# dlogB_dtheta : C
|
||||
|
||||
#C_B
|
||||
dC_B = -.5*Kmmi
|
||||
C_B = self.kern.dK_dtheta(dC_B,self.Z) #check
|
||||
|
||||
#C_A
|
||||
LBiLmi = np.dot(self.LBi,self.Lmi)
|
||||
LBL_inv = np.dot(LBiLmi.T,LBiLmi)
|
||||
dC_AA = .5*LBL_inv
|
||||
C_AA = self.kern.dK_dtheta(dC_AA,self.Z) #check
|
||||
|
||||
#C_AB
|
||||
psi1beta = self.psi1*self.beta_star.T
|
||||
dC_ABA = mdot(LBL_inv,psi1beta,Kmmipsi1.T)
|
||||
C_ABA = self.kern.dK_dtheta(dC_ABA,self.Z)
|
||||
dC_ABB = -np.dot(psi1beta.T,LBL_inv) #check
|
||||
C_ABB = self.kern.dK_dtheta(dC_ABB,self.X,self.Z) #check
|
||||
|
||||
# C_ABC
|
||||
betapsi1TLmiLBi = np.dot(psi1beta.T,LBiLmi.T)
|
||||
alpha = np.array([np.dot(a.T,a) for a in betapsi1TLmiLBi])[:,None]
|
||||
dC_ABCA = .5 *alpha
|
||||
C_ABCA = self.kern.dKdiag_dtheta(dC_ABCA,self.X) #check
|
||||
|
||||
C_ABCB = 0
|
||||
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
|
||||
dC_ABCB_n = - alpha_n * np.dot(psi1_n[None,:],Kmmi)
|
||||
C_ABCB += self.kern.dK_dtheta(dC_ABCB_n,X_n[:,None],self.Z) #check
|
||||
|
||||
C_ABCC = 0
|
||||
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
|
||||
psin_K = np.dot(psi1_n[None,:],Kmmi)
|
||||
tmp = np.dot(psin_K.T,psin_K)
|
||||
dC_ABCC = .5 * alpha_n * tmp
|
||||
C_ABCC += self.kern.dK_dtheta(dC_ABCC,self.Z) #check
|
||||
|
||||
self.dlogB_dtheta = C_B + C_AA + C_ABA + C_ABB + C_ABCA + C_ABCB + C_ABCC
|
||||
|
||||
|
||||
# the partial derivative vector for the likelihood
|
||||
if self.likelihood.Nparams == 0:
|
||||
# save computation here.
|
||||
self.partial_for_likelihood = None
|
||||
elif self.likelihood.is_heteroscedastic:
|
||||
raise NotImplementedError, "heteroscedatic derivates not implemented"
|
||||
else:
|
||||
# likelihood is not heterscedatic
|
||||
self.partial_for_likelihood = 0 #FIXME
|
||||
#self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
||||
#self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision)
|
||||
#self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
||||
|
||||
|
||||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
|
||||
"""
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
#B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + C + D # +B
|
||||
"""
|
||||
return A+C
|
||||
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
pass
|
||||
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
|
||||
def dL_dtheta(self):
|
||||
#dL_dtheta = self.dlogB_dtheta
|
||||
#dL_dtheta = self.dyby_dtheta
|
||||
#dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta
|
||||
dL_dtheta = self.dlogB_dtheta
|
||||
dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta + self.dlogB_dtheta
|
||||
"""
|
||||
dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
else:
|
||||
dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X)
|
||||
dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X)
|
||||
"""
|
||||
return dL_dtheta
|
||||
|
||||
def dL_dZ(self):
|
||||
dL_dZ = np.zeros(self.M)
|
||||
"""
|
||||
dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm, self.Z) # factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1, self.Z, self.X, self.X_variance)
|
||||
dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
else:
|
||||
dL_dZ += self.kern.dK_dX(self.dL_dpsi1, self.Z, self.X)
|
||||
"""
|
||||
return dL_dZ
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -12,3 +12,4 @@ from sparse_GPLVM import sparse_GPLVM
|
|||
from Bayesian_GPLVM import Bayesian_GPLVM
|
||||
from mrd import MRD
|
||||
from generalized_FITC import generalized_FITC
|
||||
#from FITC import FITC
|
||||
|
|
|
|||
|
|
@ -287,6 +287,9 @@ class MRD(model):
|
|||
else:
|
||||
return pylab.gcf()
|
||||
|
||||
def plot_X_1d(self):
|
||||
return self.gref.plot_X_1d()
|
||||
|
||||
def plot_X(self, fig_num="MRD Predictions", axes=None):
|
||||
fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.X))
|
||||
return fig
|
||||
|
|
|
|||
|
|
@ -9,10 +9,10 @@ from .. import kern
|
|||
from GP import GP
|
||||
from scipy import linalg
|
||||
|
||||
def backsub_both_sides(L,X):
|
||||
def backsub_both_sides(L, X):
|
||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
|
||||
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
||||
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
||||
|
||||
class sparse_GP(GP):
|
||||
"""
|
||||
|
|
@ -35,22 +35,22 @@ class sparse_GP(GP):
|
|||
"""
|
||||
|
||||
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
||||
self.scale_factor = 100.0# a scaling factor to help keep the algorithm stable
|
||||
self.auto_scale_factor = False
|
||||
# self.scale_factor = 100.0 # a scaling factor to help keep the algorithm stable
|
||||
# self.auto_scale_factor = False
|
||||
self.Z = Z
|
||||
self.M = Z.shape[0]
|
||||
self.likelihood = likelihood
|
||||
|
||||
if X_variance is None:
|
||||
self.has_uncertain_inputs=False
|
||||
self.has_uncertain_inputs = False
|
||||
else:
|
||||
assert X_variance.shape==X.shape
|
||||
self.has_uncertain_inputs=True
|
||||
assert X_variance.shape == X.shape
|
||||
self.has_uncertain_inputs = True
|
||||
self.X_variance = X_variance
|
||||
|
||||
GP.__init__(self, X, likelihood, kernel=kernel, normalize_X=normalize_X)
|
||||
|
||||
#normalize X uncertainty also
|
||||
# normalize X uncertainty also
|
||||
if self.has_uncertain_inputs:
|
||||
self.X_variance /= np.square(self._Xstd)
|
||||
|
||||
|
|
@ -59,141 +59,152 @@ class sparse_GP(GP):
|
|||
# kernel computations, using BGPLVM notation
|
||||
self.Kmm = self.kern.K(self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
self.psi0 = self.kern.psi0(self.Z,self.X, self.X_variance)
|
||||
self.psi1 = self.kern.psi1(self.Z,self.X, self.X_variance).T
|
||||
self.psi2 = self.kern.psi2(self.Z,self.X, self.X_variance)
|
||||
self.psi0 = self.kern.psi0(self.Z, self.X, self.X_variance)
|
||||
self.psi1 = self.kern.psi1(self.Z, self.X, self.X_variance).T
|
||||
self.psi2 = self.kern.psi2(self.Z, self.X, self.X_variance)
|
||||
else:
|
||||
self.psi0 = self.kern.Kdiag(self.X)
|
||||
self.psi1 = self.kern.K(self.Z,self.X)
|
||||
self.psi1 = self.kern.K(self.Z, self.X)
|
||||
self.psi2 = None
|
||||
|
||||
def _computations(self):
|
||||
sf = self.scale_factor
|
||||
sf2 = sf**2
|
||||
# sf = self.scale_factor
|
||||
# sf2 = sf ** 2
|
||||
|
||||
#factor Kmm
|
||||
# factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
|
||||
#The rather complex computations of self.A
|
||||
# The rather complex computations of self.A
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.D == 1 #TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
if self.has_uncertain_inputs:
|
||||
psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0)
|
||||
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1) / sf2)).sum(0)
|
||||
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1))).sum(0)
|
||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
|
||||
clipped_evals = np.clip(evals, 0., 1e15) # TODO: make clipping configurable
|
||||
if not np.allclose(evals, clipped_evals):
|
||||
print "Warning: clipping posterior eigenvalues"
|
||||
tmp = evecs*np.sqrt(clipped_evals)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
tmp = evecs * np.sqrt(clipped_evals)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
# tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)) / sf)
|
||||
tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
if self.has_uncertain_inputs:
|
||||
psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0)
|
||||
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision / sf2)).sum(0)
|
||||
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision)).sum(0)
|
||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
|
||||
clipped_evals = np.clip(evals, 0., 1e15) # TODO: make clipping configurable
|
||||
if not np.allclose(evals, clipped_evals):
|
||||
print "Warning: clipping posterior eigenvalues"
|
||||
tmp = evecs*np.sqrt(clipped_evals)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
tmp = evecs * np.sqrt(clipped_evals)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
# tmp = self.psi1 * (np.sqrt(self.likelihood.precision) / sf)
|
||||
tmp = self.psi1 * (np.sqrt(self.likelihood.precision))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
||||
#factor B
|
||||
self.B = np.eye(self.M)/sf2 + self.A
|
||||
# factor B
|
||||
# self.B = np.eye(self.M) / sf2 + self.A
|
||||
self.B = np.eye(self.M) + self.A
|
||||
self.LB = jitchol(self.B)
|
||||
|
||||
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
|
||||
self.psi1V = np.dot(self.psi1, self.V)
|
||||
# TODO: make a switch for either first compute psi1V, or VV.T
|
||||
self.psi1V = np.dot(self.psi1, self.likelihood.V)
|
||||
|
||||
#back substutue C into psi1V
|
||||
tmp,info1 = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.psi1V),lower=1,trans=0)
|
||||
self._LBi_Lmi_psi1V,_ = linalg.lapack.flapack.dtrtrs(self.LB,np.asfortranarray(tmp),lower=1,trans=0)
|
||||
tmp,info2 = linalg.lapack.flapack.dpotrs(self.LB,tmp,lower=1)
|
||||
self.Cpsi1V,info3 = linalg.lapack.flapack.dtrtrs(self.Lm,tmp,lower=1,trans=1)
|
||||
# back substutue C into psi1V
|
||||
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
||||
tmp, info2 = linalg.lapack.flapack.dpotrs(self.LB, tmp, lower=1)
|
||||
self.Cpsi1V, info3 = linalg.lapack.flapack.dtrtrs(self.Lm, tmp, lower=1, trans=1)
|
||||
|
||||
# Compute dL_dKmm
|
||||
tmp = tdot(self._LBi_Lmi_psi1V)
|
||||
self.DBi_plus_BiPBi = backsub_both_sides(self.LB, self.D*np.eye(self.M) + tmp)
|
||||
tmp = -0.5*self.DBi_plus_BiPBi/sf2
|
||||
tmp += -0.5*self.B*sf2*self.D
|
||||
tmp += self.D*np.eye(self.M)
|
||||
self.dL_dKmm = backsub_both_sides(self.Lm,tmp)
|
||||
self.DBi_plus_BiPBi = backsub_both_sides(self.LB, self.D * np.eye(self.M) + tmp)
|
||||
# tmp = -0.5 * self.DBi_plus_BiPBi / sf2
|
||||
# tmp += -0.5 * self.B * sf2 * self.D
|
||||
tmp = -0.5 * self.DBi_plus_BiPBi
|
||||
tmp += -0.5 * self.B * self.D
|
||||
tmp += self.D * np.eye(self.M)
|
||||
self.dL_dKmm = backsub_both_sides(self.Lm, tmp)
|
||||
|
||||
# Compute dL_dpsi # FIXME: this is untested for the heterscedastic + uncertain inputs case
|
||||
self.dL_dpsi0 = - 0.5 * self.D * (self.likelihood.precision * np.ones([self.N,1])).flatten()
|
||||
self.dL_dpsi1 = np.dot(self.Cpsi1V,self.V.T)
|
||||
dL_dpsi2_beta = 0.5*backsub_both_sides(self.Lm,self.D*np.eye(self.M) - self.DBi_plus_BiPBi)
|
||||
self.dL_dpsi0 = -0.5 * self.D * (self.likelihood.precision * np.ones([self.N, 1])).flatten()
|
||||
self.dL_dpsi1 = np.dot(self.Cpsi1V, self.likelihood.V.T)
|
||||
dL_dpsi2_beta = 0.5 * backsub_both_sides(self.Lm, self.D * np.eye(self.M) - self.DBi_plus_BiPBi)
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
if self.has_uncertain_inputs:
|
||||
self.dL_dpsi2 = self.likelihood.precision[:,None,None]*dL_dpsi2_beta[None,:,:]
|
||||
self.dL_dpsi2 = self.likelihood.precision[:, None, None] * dL_dpsi2_beta[None, :, :]
|
||||
else:
|
||||
self.dL_dpsi1 += 2.*np.dot(dL_dpsi2_beta,self.psi1*self.likelihood.precision.reshape(1,self.N))
|
||||
self.dL_dpsi1 += 2.*np.dot(dL_dpsi2_beta, self.psi1 * self.likelihood.precision.reshape(1, self.N))
|
||||
self.dL_dpsi2 = None
|
||||
else:
|
||||
dL_dpsi2 = self.likelihood.precision*dL_dpsi2_beta
|
||||
dL_dpsi2 = self.likelihood.precision * dL_dpsi2_beta
|
||||
if self.has_uncertain_inputs:
|
||||
#repeat for each of the N psi_2 matrices
|
||||
self.dL_dpsi2 = np.repeat(dL_dpsi2[None,:,:],self.N,axis=0)
|
||||
# repeat for each of the N psi_2 matrices
|
||||
self.dL_dpsi2 = np.repeat(dL_dpsi2[None, :, :], self.N, axis=0)
|
||||
else:
|
||||
#subsume back into psi1 (==Kmn)
|
||||
self.dL_dpsi1 += 2.*np.dot(dL_dpsi2,self.psi1)
|
||||
# subsume back into psi1 (==Kmn)
|
||||
self.dL_dpsi1 += 2.*np.dot(dL_dpsi2, self.psi1)
|
||||
self.dL_dpsi2 = None
|
||||
|
||||
|
||||
#the partial derivative vector for the likelihood
|
||||
if self.likelihood.Nparams ==0:
|
||||
#save computation here.
|
||||
# the partial derivative vector for the likelihood
|
||||
if self.likelihood.Nparams == 0:
|
||||
# save computation here.
|
||||
self.partial_for_likelihood = None
|
||||
elif self.likelihood.is_heteroscedastic:
|
||||
raise NotImplementedError, "heteroscedatic derivates not implemented"
|
||||
else:
|
||||
#likelihood is not heterscedatic
|
||||
self.partial_for_likelihood = - 0.5 * self.N*self.D*self.likelihood.precision + 0.5 * self.likelihood.trYYT*self.likelihood.precision**2
|
||||
self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum()*self.likelihood.precision**2 - np.trace(self.A)*self.likelihood.precision*sf2)
|
||||
self.partial_for_likelihood += self.likelihood.precision*(0.5*np.sum(self.A*self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
||||
# likelihood is not heterscedatic
|
||||
self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
||||
# self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision * sf2)
|
||||
self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision)
|
||||
self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
||||
|
||||
|
||||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
sf2 = self.scale_factor**2
|
||||
# sf2 = self.scale_factor ** 2
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5*self.N*self.D*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y)
|
||||
B = -0.5*self.D*(np.sum(self.likelihood.precision.flatten()*self.psi0) - np.trace(self.A)*sf2)
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
||||
# B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A) * sf2)
|
||||
B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
else:
|
||||
A = -0.5*self.N*self.D*(np.log(2.*np.pi) + np.log(self.likelihood._variance)) -0.5*self.likelihood.precision*self.likelihood.trYYT
|
||||
B = -0.5*self.D*(np.sum(self.likelihood.precision*self.psi0) - np.trace(self.A)*sf2)
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))) + 0.5*self.M*np.log(sf2))
|
||||
D = 0.5*np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A+B+C+D
|
||||
A = -0.5 * self.N * self.D * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
# B = -0.5 * self.D * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A) * sf2)
|
||||
B = -0.5 * self.D * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||
# C = -self.D * (np.sum(np.log(np.diag(self.LB))) + 0.5 * self.M * np.log(sf2))
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.M * np.log(sf2))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + B + C + D
|
||||
|
||||
def _set_params(self, p):
|
||||
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:])
|
||||
self.Z = p[:self.M * self.Q].reshape(self.M, self.Q)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
#if self.auto_scale_factor:
|
||||
# if self.auto_scale_factor:
|
||||
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
#if self.auto_scale_factor:
|
||||
#if self.likelihood.is_heteroscedastic:
|
||||
#self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
||||
#else:
|
||||
#self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
self.scale_factor = 1.
|
||||
# if self.auto_scale_factor:
|
||||
# if self.likelihood.is_heteroscedastic:
|
||||
# self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
||||
# else:
|
||||
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
# self.scale_factor = 100.
|
||||
self._computations()
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack([self.Z.flatten(),GP._get_params(self)])
|
||||
return np.hstack([self.Z.flatten(), GP._get_params(self)])
|
||||
|
||||
def _get_param_names(self):
|
||||
return sum([['iip_%i_%i'%(i,j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[]) + GP._get_param_names(self)
|
||||
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], []) + GP._get_param_names(self)
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
|
|
@ -205,9 +216,9 @@ class sparse_GP(GP):
|
|||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "EP approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_DTC(self.Kmm,self.psi1)
|
||||
#self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
self.likelihood.fit_DTC(self.Kmm, self.psi1)
|
||||
# self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
|
|
@ -217,13 +228,13 @@ class sparse_GP(GP):
|
|||
"""
|
||||
Compute and return the derivative of the log marginal likelihood wrt the parameters of the kernel
|
||||
"""
|
||||
dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm,self.Z)
|
||||
dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z,self.X,self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T,self.Z,self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z,self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
else:
|
||||
dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1,self.Z,self.X)
|
||||
dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X)
|
||||
dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X)
|
||||
|
||||
return dL_dtheta
|
||||
|
|
@ -243,17 +254,17 @@ class sparse_GP(GP):
|
|||
def _raw_predict(self, Xnew, which_parts='all', full_cov=False):
|
||||
"""Internal helper function for making predictions, does not account for normalization"""
|
||||
|
||||
Bi,_ = linalg.lapack.flapack.dpotri(self.LB,lower=0) # WTH? this lower switch should be 1, but that doesn't work!
|
||||
Bi, _ = linalg.lapack.flapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
|
||||
symmetrify(Bi)
|
||||
Kmmi_LmiBLmi = backsub_both_sides(self.Lm,np.eye(self.M) - Bi)
|
||||
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)
|
||||
|
||||
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
|
||||
mu = np.dot(Kx.T, self.Cpsi1V/self.scale_factor)
|
||||
mu = np.dot(Kx.T, self.Cpsi1V) # / self.scale_factor)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(Xnew,which_parts=which_parts)
|
||||
var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) #NOTE this won't work for plotting
|
||||
Kxx = self.kern.K(Xnew, which_parts=which_parts)
|
||||
var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts)
|
||||
var = Kxx - np.sum(Kx*np.dot(Kmmi_LmiBLmi, Kx),0)
|
||||
Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts)
|
||||
var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0)
|
||||
|
||||
return mu,var[:,None]
|
||||
return mu, var[:, None]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue