diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 133895ff..3c3d59e6 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -55,7 +55,6 @@ class rbf(kernpart): self._X, self._X2, self._params = np.empty(shape=(3,1)) def _get_params(self): - foo return np.hstack((self.variance,self.lengthscale)) def _set_params(self,x): diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 0eb957a9..430c2718 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -83,3 +83,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): def _log_likelihood_gradients(self): return np.hstack((self.dL_dmuS().flatten(), sparse_GP._log_likelihood_gradients(self))) + + def plot_latent(self, *args, **kwargs): + input_1, input_2 = GPLVM.plot_latent(*args, **kwargs) + pb.plot(m.Z[:, input_1], m.Z[:, input_2], '^w') diff --git a/GPy/models/GPLVM.py b/GPy/models/GPLVM.py index d0dc766f..b44801fc 100644 --- a/GPy/models/GPLVM.py +++ b/GPy/models/GPLVM.py @@ -117,6 +117,4 @@ class GPLVM(GP): pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) - - - + return input_1, input_2 diff --git a/GPy/models/sparse_GPLVM.py b/GPy/models/sparse_GPLVM.py index 542fbe0e..591c49b2 100644 --- a/GPy/models/sparse_GPLVM.py +++ b/GPy/models/sparse_GPLVM.py @@ -55,3 +55,7 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM): #passing Z without a small amout of jitter will induce the white kernel where we don;t want it! mu, var, upper, lower = sparse_GP_regression.predict(self, self.Z+np.random.randn(*self.Z.shape)*0.0001) pb.plot(mu[:, 0] , mu[:, 1], 'ko') + + def plot_latent(self, *args, **kwargs): + input_1, input_2 = GPLVM.plot_latent(*args, **kwargs) + pb.plot(m.Z[:, input_1], m.Z[:, input_2], '^w')