diff --git a/GPy/core/gp.py b/GPy/core/gp.py index ad082b3c..9a199faa 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -108,9 +108,15 @@ class GP(Model): # The predictive variable to be used to predict using the posterior object's # woodbury_vector and woodbury_inv is defined as predictive_variable + # as long as the posterior has the right woodbury entries. + # It is the input variable used for the covariance between + # X_star and the posterior of the GP. # This is usually just a link to self.X (full GP) or self.Z (sparse GP). # Make sure to name this variable and the predict functions will "just work" - # as long as the posterior has the right woodbury entries. + # In maths the predictive variable is: + # K_{xx} - K_{xp}W_{pp}^{-1}K_{px} + # W_{pp} := \texttt{Woodbury inv} + # p := _predictive_variable self._predictive_variable = self.X @@ -213,7 +219,7 @@ class GP(Model): Kxx = kern.K(Xnew) if self.posterior.woodbury_inv.ndim == 2: var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx)) - elif self.posterior.woodbury_inv.ndim == 3: + elif self.posterior.woodbury_inv.ndim == 3: # Missing data var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2])) from ..util.linalg import mdot for i in range(var.shape[2]): @@ -223,7 +229,7 @@ class GP(Model): Kxx = kern.Kdiag(Xnew) if self.posterior.woodbury_inv.ndim == 2: var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None] - elif self.posterior.woodbury_inv.ndim == 3: + elif self.posterior.woodbury_inv.ndim == 3: # Missing data var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2])) for i in range(var.shape[1]): var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0))) @@ -364,11 +370,15 @@ class GP(Model): var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full) return var_jac - if self.posterior.woodbury_inv.ndim == 3: - var_jac = [] - for d in range(self.posterior.woodbury_inv.shape[2]): - var_jac.append(compute_cov_inner(self.posterior.woodbury_inv[:, :, d])) - var_jac = np.concatenate(var_jac) + if self.posterior.woodbury_inv.ndim == 3: # Missing data: + if full_cov: + var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],self.output_dim)) + for d in range(self.posterior.woodbury_inv.shape[2]): + var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) + else: + var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim)) + for d in range(self.posterior.woodbury_inv.shape[2]): + var_jac[:, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) else: var_jac = compute_cov_inner(self.posterior.woodbury_inv) return mean_jac, var_jac @@ -391,10 +401,11 @@ class GP(Model): mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False) mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac) + Sigma = np.zeros(mumuT.shape) if var_jac.ndim == 3: - Sigma = np.einsum('iqd,ipd->iqp', var_jac, var_jac) + Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1) else: - Sigma = self.output_dim*np.einsum('iq,ip->iqp', var_jac, var_jac) + Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac G = 0. if mean: G += mumuT @@ -412,8 +423,13 @@ class GP(Model): """ G = self.predict_wishard_embedding(Xnew, kern, mean, covariance) from ..util.linalg import jitchol - return np.array([np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) for n in range(Xnew.shape[0])]) - #return np.array([np.sqrt(np.linalg.det(G[n, :, :])) for n in range(Xnew.shape[0])]) + mag = np.empty(Xnew.shape[0]) + for n in range(Xnew.shape[0]): + try: + mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) + except: + mag[n] = np.sqrt(np.linalg.det(G[n, :, :])) + return mag def posterior_samples_f(self,X,size=10, full_cov=True): """ diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index d4f4f564..17d42e5a 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -36,8 +36,10 @@ class GPLVM(GP): likelihood = Gaussian() super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM') + self.X = Param('latent_mean', X) self.link_parameter(self.X, index=0) + self._predictive_variable = self.X def parameters_changed(self): super(GPLVM, self).parameters_changed() diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index a36f168d..ff0887b8 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -119,7 +119,7 @@ def plot_latent(model, labels=None, which_indices=None, Xtest_full[:, [input_1, input_2]] = x _, var = model.predict(Xtest_full, **predict_kwargs) var = var[:, :1] - return np.log(var) + return 2*np.sqrt(var) #Create an IMshow controller that can re-plot the latent space shading at a good resolution if plot_limits is None: diff --git a/GPy/testing/run_coverage.sh b/GPy/testing/run_coverage.sh index f2e52230..f9b33015 100755 --- a/GPy/testing/run_coverage.sh +++ b/GPy/testing/run_coverage.sh @@ -1 +1 @@ -nosetests . --with-coverage --logging-level=INFO --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase +nosetests . --with-coverage --logging-level=INFO --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase --cover-omit=GPy.examples