From dedcf1df5aa0f7eef2016df2f360bbee836603a8 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 14:12:06 +0100 Subject: [PATCH 1/8] plot_ARD greatly improved, plotting of ARD paramters for multiple ARD kernels now supported --- GPy/kern/kern.py | 50 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 5cd90749..4786196e 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -66,12 +66,27 @@ class kern(Parameterized): Parameterized.setstate(self, state) - def plot_ARD(self, fignum=None, ax=None, title=None): - """If an ARD kernel is present, it bar-plots the ARD parameters""" + def plot_ARD(self, fignum=None, ax=None, title='', legend=False): + """If an ARD kernel is present, it bar-plots the ARD parameters, + :param fignum: figure number of the plot + :param ax: matplotlib axis to plot on + :param title: + title of the plot, + pass '' to not print a title + pass None for a generic title + """ if ax is None: fig = pb.figure(fignum) ax = fig.add_subplot(111) + from GPy.util import Tango + from matplotlib.textpath import TextPath + Tango.reset() + xticklabels = [] + bars = [] + x0 = 0 + xes = [] for p in self.parts: + c = Tango.nextMedium() if hasattr(p, 'ARD') and p.ARD: if title is None: ax.set_title('ARD parameters, %s kernel' % p.name) @@ -82,10 +97,32 @@ class kern(Parameterized): else: ard_params = 1. / p.lengthscale - x = np.arange(len(ard_params)) - ax.bar(x - 0.4, ard_params) - ax.set_xticks(x) - ax.set_xticklabels([r"${}$".format(i) for i in x]) + x = np.arange(x0, x0 + len(ard_params)) + bars.append(ax.bar(x, ard_params, align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name)) + xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))]) + x0 += len(ard_params) + x = np.arange(x0) + ax.set_xticks([]) + for bar in bars: + for patch, num in zip(bar.patches, np.arange(len(bar.patches))): + height = patch.get_height() + xi = patch.get_x() + patch.get_width() / 2. + va = 'top' + c = 'w' + t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, usetex=True, ha='center') + if patch.get_extents().height <= t.get_extents().height + 2: + va = 'bottom' + c = 'k' + ax.text(xi, height, "${xi}$".format(xi=int(num)), color=c, rotation=0, ha='center', va=va) + # for xi, t in zip(x, xticklabels): + # ax.text(xi, maxi / 2, t, rotation=90, ha='center', va='center') + # ax.set_xticklabels(xticklabels, rotation=17) + ax.set_xlim(-.5, x0 - .5) + if title is '': + ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, + ncol=len(bars), mode="expand", borderaxespad=0.) + else: + ax.legend() return ax def _transform_gradients(self, g): @@ -361,6 +398,7 @@ class kern(Parameterized): # compute the "cross" terms # TODO: better looping, input_slices + for i1, i2 in itertools.permutations(range(len(self.parts)), 2): p1, p2 = self.parts[i1], self.parts[i2] # ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2] From 0870301144140b5112a0d7d75d6055436234ea62 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 14:48:04 +0100 Subject: [PATCH 2/8] BGPLVM pca init now able to cope with all dimension issues --- GPy/models/gplvm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 2e81b370..86c0ac28 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -36,10 +36,10 @@ class GPLVM(GP): self.ensure_default_constraints() def initialise_latent(self, init, input_dim, Y): + Xr = np.random.randn(Y.shape[0], input_dim) if init == 'PCA': - return PCA(Y, input_dim)[0] - else: - return np.random.randn(Y.shape[0], input_dim) + Xr[:, :Y.shape[1]] = PCA(Y, input_dim)[0] + return Xr def getstate(self): return GP.getstate(self) From c280cdac90e0c4518f2049f33f9d8f3432a49ee3 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 14:50:02 +0100 Subject: [PATCH 3/8] added unittests for sparse gp with uncertain inputs --- GPy/testing/unit_tests.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 6e504a69..b3029d7f 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -23,7 +23,7 @@ class GradientTests(unittest.TestCase): self.X2D = np.random.uniform(-3., 3., (40, 2)) self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05 - def check_model_with_white(self, kern, model_type='GPRegression', dimension=1): + def check_model_with_white(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False): # Get the correct gradients if dimension == 1: X = self.X1D @@ -36,7 +36,10 @@ class GradientTests(unittest.TestCase): noise = GPy.kern.white(dimension) kern = kern + noise - m = model_fit(X, Y, kernel=kern) + if uncertain_inputs: + m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1])) + else: + m = model_fit(X, Y, kernel=kern) m.randomize() # contrain all parameters to be positive self.assertTrue(m.checkgrad()) @@ -141,6 +144,26 @@ class GradientTests(unittest.TestCase): rbf = GPy.kern.rbf(2) self.check_model_with_white(rbf, model_type='SparseGPRegression', dimension=2) + def test_SparseGPRegression_rbf_linear_white_kern_1D(self): + ''' Testing the sparse GP regression with rbf and white kernel on 2d data ''' + rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1) + self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1) + + def test_SparseGPRegression_rbf_linear_white_kern_2D(self): + ''' Testing the sparse GP regression with rbf and white kernel on 2d data ''' + rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2) + self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2) + + def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self): + ''' Testing the sparse GP regression with rbf, linear and white kernel on 2d data with uncertain inputs''' + rbflin = GPy.kern.rbf(2) + GPy.kern.linear(2) + self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1) + + def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self): + ''' Testing the sparse GP regression with rbf, linear and white kernel on 1d data with uncertain inputs''' + rbflin = GPy.kern.rbf(1) + GPy.kern.linear(1) + self.check_model_with_white(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1) + def test_GPLVM_rbf_bias_white_kern_2D(self): """ Testing GPLVM with rbf + bias and white kernel """ N, input_dim, D = 50, 1, 2 From a8ae457e6b4addc1a187787d25ae3646c0d0f710 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 15:00:49 +0100 Subject: [PATCH 4/8] BGPLVM example testing with rbf_inv --- GPy/examples/dimensionality_reduction.py | 44 ++++++++++++------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index ac39fd66..ef6e37fb 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -7,26 +7,27 @@ from matplotlib import pyplot as plt, cm import GPy from GPy.core.transformations import logexp from GPy.models.bayesian_gplvm import BayesianGPLVM +from GPy.likelihoods.gaussian import Gaussian default_seed = np.random.seed(123344) def BGPLVM(seed=default_seed): N = 10 num_inducing = 3 - Q = 2 - D = 4 + Q = 5 + D = 10 # generate GPLVM-like data X = np.random.rand(N, Q) - k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001) + k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.01) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, Q).T + lik = Gaussian(Y, normalize=True) - k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q) - # k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q) + k = GPy.kern.rbf_inv(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001) - m = GPy.models.BayesianGPLVM(Y, Q, kernel=k, num_inducing=num_inducing) + m = GPy.models.BayesianGPLVM(lik, Q, kernel=k, num_inducing=num_inducing) # m.constrain_positive('(rbf|bias|noise|white|S)') # m.constrain_fixed('S', 1) @@ -70,16 +71,16 @@ def sparseGPLVM_oil(optimize=True, N=100, Q=6, num_inducing=15, max_iters=50): # create simple GP model kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) - m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing = num_inducing) + m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing) m.data_labels = data['Y'].argmax(axis=1) # optimize if optimize: - m.optimize('scg', messages=1, max_iters = max_iters) + m.optimize('scg', messages=1, max_iters=max_iters) # plot print(m) - #m.plot_latent(labels=m.data_labels) + # m.plot_latent(labels=m.data_labels) return m def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False): @@ -136,12 +137,12 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False m.optimize('scg', messages=1) return m -def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=False, **k): +def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot=False, **k): np.random.seed(0) data = GPy.util.datasets.oil() # create simple GP model - kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) + kernel = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) Y = data['X'][:N] Yn = Y - Y.mean(0) Yn /= Yn.std(0) @@ -150,14 +151,14 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=F m.data_labels = data['Y'][:N].argmax(axis=1) # m.constrain('variance|leng', logexp_clipped()) - m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0) + m['.*lengt'] = m.X.var(0).max() / m.X.var(0) m['noise'] = Yn.var() / 100. # optimize if optimize: m.constrain_fixed('noise') - m.optimize('scg', messages=1, max_iters=100, gtol=.05) + m.optimize('scg', messages=1, max_iters=200, gtol=.05) m.constrain_positive('noise') m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05) @@ -208,7 +209,7 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): Y1 += .3 * np.random.randn(*Y1.shape) Y2 += .2 * np.random.randn(*Y2.shape) - Y3 += .1 * np.random.randn(*Y3.shape) + Y3 += .25 * np.random.randn(*Y3.shape) Y1 -= Y1.mean(0) Y2 -= Y2.mean(0) @@ -263,16 +264,16 @@ def bgplvm_simulation_matlab_compare(): def bgplvm_simulation(optimize='scg', plot=True, - max_iters=2e4): + max_iters=2e4, + plot_sim=False): # from GPy.core.transformations import logexp_clipped - D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5 - slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot) + D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 23, 6 + slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) from GPy.models import mrd from GPy import kern reload(mrd); reload(kern) - Y = Ylist[0] k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) @@ -280,7 +281,6 @@ def bgplvm_simulation(optimize='scg', # m.constrain('variance|noise', logexp_clipped()) m['noise'] = Y.var() / 100. - m['linear_variance'] = .01 if optimize: print "Optimizing model:" @@ -346,7 +346,7 @@ def brendan_faces(): def stick_play(range=None, frame_rate=15): data = GPy.util.datasets.stick() # optimize - if range==None: + if range == None: Y = data['Y'].copy() else: Y = data['Y'][range[0]:range[1], :].copy() @@ -374,10 +374,10 @@ def stick_bgplvm(model=None): data = GPy.util.datasets.stick() Q = 6 kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) - m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20,kernel=kernel) + m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel) # optimize m.ensure_default_constraints() - m.optimize(messages=1, max_f_eval=3000,xtol=1e-300,ftol=1e-300) + m.optimize(messages=1, max_f_eval=3000, xtol=1e-300, ftol=1e-300) m._set_params(m._get_params()) plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2) plt.sca(latent_axes) From c44bda766c15fca87e623abef512053cb4a0701e Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 15:01:00 +0100 Subject: [PATCH 5/8] minor pep8 stuff --- GPy/core/gp.py | 3 ++- GPy/core/sparse_gp.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 847cd99d..ce2b59de 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -96,7 +96,8 @@ class GP(GPBase): model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ - return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z + return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - + 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z) def _log_likelihood_gradients(self): diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 93ba5d7d..a6478850 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -108,7 +108,7 @@ class SparseGP(GPBase): self.B = np.eye(self.num_inducing) + self.A self.LB = jitchol(self.B) - #VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! + # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! self.psi1Vf = np.dot(self.psi1.T, self.likelihood.VVT_factor) # back substutue C into psi1Vf @@ -163,7 +163,7 @@ class SparseGP(GPBase): def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ if self.likelihood.is_heteroscedastic: - A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V*self.likelihood.Y) + A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y) B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A)) else: A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT @@ -246,7 +246,7 @@ class SparseGP(GPBase): Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.num_inducing) - Bi) if self.Cpsi1V is None: - psi1V = np.dot(self.psi1.T,self.likelihood.V) + psi1V = np.dot(self.psi1.T, self.likelihood.V) tmp, _ = dtrtrs(self.Lm, np.asfortranarray(psi1V), lower=1, trans=0) tmp, _ = dpotrs(self.LB, tmp, lower=1) self.Cpsi1V, _ = dtrtrs(self.Lm, tmp, lower=1, trans=1) From 03dd1ba895844d512d56c97a9f5128f73d074c3b Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 15:22:17 +0100 Subject: [PATCH 6/8] plot_ARD greatly improved, plotting of ARD paramters for multiple ARD kernels now supported --- GPy/kern/kern.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 4786196e..3f78e5e5 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -84,7 +84,6 @@ class kern(Parameterized): xticklabels = [] bars = [] x0 = 0 - xes = [] for p in self.parts: c = Tango.nextMedium() if hasattr(p, 'ARD') and p.ARD: @@ -102,7 +101,6 @@ class kern(Parameterized): xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))]) x0 += len(ard_params) x = np.arange(x0) - ax.set_xticks([]) for bar in bars: for patch, num in zip(bar.patches, np.arange(len(bar.patches))): height = patch.get_height() @@ -117,6 +115,7 @@ class kern(Parameterized): # for xi, t in zip(x, xticklabels): # ax.text(xi, maxi / 2, t, rotation=90, ha='center', va='center') # ax.set_xticklabels(xticklabels, rotation=17) + ax.set_xticks([]) ax.set_xlim(-.5, x0 - .5) if title is '': ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, From c42cdba0e6bf84c9bb7ef8a509bc52de6417503a Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 15:22:27 +0100 Subject: [PATCH 7/8] BGPLVM example testing with rbf_inv --- GPy/examples/dimensionality_reduction.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index ef6e37fb..5e0c77cb 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -18,16 +18,18 @@ def BGPLVM(seed=default_seed): D = 10 # generate GPLVM-like data X = np.random.rand(N, Q) - k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.01) + lengthscales = np.random.rand(Q) + k = GPy.kern.rbf(Q, .5, lengthscales, ARD=True) + GPy.kern.white(Q, 0.01) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, Q).T lik = Gaussian(Y, normalize=True) - k = GPy.kern.rbf_inv(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q) + k = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001) m = GPy.models.BayesianGPLVM(lik, Q, kernel=k, num_inducing=num_inducing) + m.lengthscales = lengthscales # m.constrain_positive('(rbf|bias|noise|white|S)') # m.constrain_fixed('S', 1) @@ -38,8 +40,8 @@ def BGPLVM(seed=default_seed): # m.optimize(messages = 1) # m.plot() # pb.title('After optimisation') - m.randomize() - m.checkgrad(verbose=1) + # m.randomize() + # m.checkgrad(verbose=1) return m @@ -143,6 +145,7 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot= # create simple GP model kernel = GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) + kernel += GPy.kern.rbf_inv(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) Y = data['X'][:N] Yn = Y - Y.mean(0) Yn /= Yn.std(0) @@ -151,7 +154,7 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot= m.data_labels = data['Y'][:N].argmax(axis=1) # m.constrain('variance|leng', logexp_clipped()) - m['.*lengt'] = m.X.var(0).max() / m.X.var(0) + # m['.*lengt'] = m.X.var(0).max() / m.X.var(0) m['noise'] = Yn.var() / 100. @@ -267,7 +270,7 @@ def bgplvm_simulation(optimize='scg', max_iters=2e4, plot_sim=False): # from GPy.core.transformations import logexp_clipped - D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 23, 6 + D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 30, 6 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) from GPy.models import mrd From 09ee9e5fab8c7819def1a6ebabdefaac7a69ed03 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 17 Jul 2013 15:43:46 +0100 Subject: [PATCH 8/8] bayesian gplvm plots inducing inputs automatically --- GPy/models/bayesian_gplvm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 68b0a133..ac6c7c69 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -115,8 +115,8 @@ class BayesianGPLVM(SparseGP, GPLVM): self.dbound_dZtheta = SparseGP._log_likelihood_gradients(self) return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta)) - def plot_latent(self, *args, **kwargs): - return plot_latent.plot_latent(self, *args, **kwargs) + def plot_latent(self, plot_inducing=True, *args, **kwargs): + return plot_latent.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs) def do_test_latents(self, Y): """