mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 02:52:40 +02:00
[more coverage] and predictive var fixes
This commit is contained in:
parent
ec7334846c
commit
929cf0a489
4 changed files with 32 additions and 14 deletions
|
|
@ -108,9 +108,15 @@ class GP(Model):
|
||||||
|
|
||||||
# The predictive variable to be used to predict using the posterior object's
|
# The predictive variable to be used to predict using the posterior object's
|
||||||
# woodbury_vector and woodbury_inv is defined as predictive_variable
|
# woodbury_vector and woodbury_inv is defined as predictive_variable
|
||||||
|
# as long as the posterior has the right woodbury entries.
|
||||||
|
# It is the input variable used for the covariance between
|
||||||
|
# X_star and the posterior of the GP.
|
||||||
# This is usually just a link to self.X (full GP) or self.Z (sparse GP).
|
# This is usually just a link to self.X (full GP) or self.Z (sparse GP).
|
||||||
# Make sure to name this variable and the predict functions will "just work"
|
# Make sure to name this variable and the predict functions will "just work"
|
||||||
# as long as the posterior has the right woodbury entries.
|
# In maths the predictive variable is:
|
||||||
|
# K_{xx} - K_{xp}W_{pp}^{-1}K_{px}
|
||||||
|
# W_{pp} := \texttt{Woodbury inv}
|
||||||
|
# p := _predictive_variable
|
||||||
self._predictive_variable = self.X
|
self._predictive_variable = self.X
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -213,7 +219,7 @@ class GP(Model):
|
||||||
Kxx = kern.K(Xnew)
|
Kxx = kern.K(Xnew)
|
||||||
if self.posterior.woodbury_inv.ndim == 2:
|
if self.posterior.woodbury_inv.ndim == 2:
|
||||||
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
|
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
|
||||||
elif self.posterior.woodbury_inv.ndim == 3:
|
elif self.posterior.woodbury_inv.ndim == 3: # Missing data
|
||||||
var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
|
var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
|
||||||
from ..util.linalg import mdot
|
from ..util.linalg import mdot
|
||||||
for i in range(var.shape[2]):
|
for i in range(var.shape[2]):
|
||||||
|
|
@ -223,7 +229,7 @@ class GP(Model):
|
||||||
Kxx = kern.Kdiag(Xnew)
|
Kxx = kern.Kdiag(Xnew)
|
||||||
if self.posterior.woodbury_inv.ndim == 2:
|
if self.posterior.woodbury_inv.ndim == 2:
|
||||||
var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
|
var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
|
||||||
elif self.posterior.woodbury_inv.ndim == 3:
|
elif self.posterior.woodbury_inv.ndim == 3: # Missing data
|
||||||
var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
|
var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
|
||||||
for i in range(var.shape[1]):
|
for i in range(var.shape[1]):
|
||||||
var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
|
var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
|
||||||
|
|
@ -364,11 +370,15 @@ class GP(Model):
|
||||||
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
|
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
|
||||||
return var_jac
|
return var_jac
|
||||||
|
|
||||||
if self.posterior.woodbury_inv.ndim == 3:
|
if self.posterior.woodbury_inv.ndim == 3: # Missing data:
|
||||||
var_jac = []
|
if full_cov:
|
||||||
for d in range(self.posterior.woodbury_inv.shape[2]):
|
var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],self.output_dim))
|
||||||
var_jac.append(compute_cov_inner(self.posterior.woodbury_inv[:, :, d]))
|
for d in range(self.posterior.woodbury_inv.shape[2]):
|
||||||
var_jac = np.concatenate(var_jac)
|
var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
|
||||||
|
else:
|
||||||
|
var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
|
||||||
|
for d in range(self.posterior.woodbury_inv.shape[2]):
|
||||||
|
var_jac[:, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
|
||||||
else:
|
else:
|
||||||
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
|
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
|
||||||
return mean_jac, var_jac
|
return mean_jac, var_jac
|
||||||
|
|
@ -391,10 +401,11 @@ class GP(Model):
|
||||||
|
|
||||||
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
|
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
|
||||||
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
|
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
|
||||||
|
Sigma = np.zeros(mumuT.shape)
|
||||||
if var_jac.ndim == 3:
|
if var_jac.ndim == 3:
|
||||||
Sigma = np.einsum('iqd,ipd->iqp', var_jac, var_jac)
|
Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1)
|
||||||
else:
|
else:
|
||||||
Sigma = self.output_dim*np.einsum('iq,ip->iqp', var_jac, var_jac)
|
Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac
|
||||||
G = 0.
|
G = 0.
|
||||||
if mean:
|
if mean:
|
||||||
G += mumuT
|
G += mumuT
|
||||||
|
|
@ -412,8 +423,13 @@ class GP(Model):
|
||||||
"""
|
"""
|
||||||
G = self.predict_wishard_embedding(Xnew, kern, mean, covariance)
|
G = self.predict_wishard_embedding(Xnew, kern, mean, covariance)
|
||||||
from ..util.linalg import jitchol
|
from ..util.linalg import jitchol
|
||||||
return np.array([np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) for n in range(Xnew.shape[0])])
|
mag = np.empty(Xnew.shape[0])
|
||||||
#return np.array([np.sqrt(np.linalg.det(G[n, :, :])) for n in range(Xnew.shape[0])])
|
for n in range(Xnew.shape[0]):
|
||||||
|
try:
|
||||||
|
mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :]))))))
|
||||||
|
except:
|
||||||
|
mag[n] = np.sqrt(np.linalg.det(G[n, :, :]))
|
||||||
|
return mag
|
||||||
|
|
||||||
def posterior_samples_f(self,X,size=10, full_cov=True):
|
def posterior_samples_f(self,X,size=10, full_cov=True):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -36,8 +36,10 @@ class GPLVM(GP):
|
||||||
likelihood = Gaussian()
|
likelihood = Gaussian()
|
||||||
|
|
||||||
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
|
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
|
||||||
|
|
||||||
self.X = Param('latent_mean', X)
|
self.X = Param('latent_mean', X)
|
||||||
self.link_parameter(self.X, index=0)
|
self.link_parameter(self.X, index=0)
|
||||||
|
self._predictive_variable = self.X
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
super(GPLVM, self).parameters_changed()
|
super(GPLVM, self).parameters_changed()
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ def plot_latent(model, labels=None, which_indices=None,
|
||||||
Xtest_full[:, [input_1, input_2]] = x
|
Xtest_full[:, [input_1, input_2]] = x
|
||||||
_, var = model.predict(Xtest_full, **predict_kwargs)
|
_, var = model.predict(Xtest_full, **predict_kwargs)
|
||||||
var = var[:, :1]
|
var = var[:, :1]
|
||||||
return np.log(var)
|
return 2*np.sqrt(var)
|
||||||
|
|
||||||
#Create an IMshow controller that can re-plot the latent space shading at a good resolution
|
#Create an IMshow controller that can re-plot the latent space shading at a good resolution
|
||||||
if plot_limits is None:
|
if plot_limits is None:
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
nosetests . --with-coverage --logging-level=INFO --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase
|
nosetests . --with-coverage --logging-level=INFO --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase --cover-omit=GPy.examples
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue