From fad0e07624971b0e381db34806b1b27ae7d27fcb Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 28 Jan 2013 00:16:23 +0000 Subject: [PATCH] Sparse EP --- GPy/examples/ep_fix.py | 5 +- GPy/examples/poisson.py | 50 +++++++ GPy/examples/sparse_ep_fix.py | 76 ++++++++++ GPy/inference/EP.py | 9 +- GPy/models/GP.py | 8 +- GPy/models/__init__.py | 1 + GPy/models/sparse_GP.py | 258 ++++++++++++++++++++++++++++++++++ 7 files changed, 399 insertions(+), 8 deletions(-) create mode 100644 GPy/examples/poisson.py create mode 100644 GPy/examples/sparse_ep_fix.py create mode 100644 GPy/models/sparse_GP.py diff --git a/GPy/examples/ep_fix.py b/GPy/examples/ep_fix.py index 2da94335..9b35b3ff 100644 --- a/GPy/examples/ep_fix.py +++ b/GPy/examples/ep_fix.py @@ -10,6 +10,7 @@ import numpy as np import GPy pb.ion() +pb.close('all') default_seed=10000 model_type='Full' @@ -26,11 +27,13 @@ data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1]) m = GPy.models.GP(data['X'],likelihood=likelihood) +#m = GPy.models.GP(data['X'],Y=likelihood.Y) m.constrain_positive('var') m.constrain_positive('len') m.tie_param('lengthscale') -m.approximate_likelihood() +if not isinstance(m.likelihood,GPy.inference.likelihoods.gaussian): + m.approximate_likelihood() print m.checkgrad() # Optimize and plot m.optimize() diff --git a/GPy/examples/poisson.py b/GPy/examples/poisson.py new file mode 100644 index 00000000..5a1cc6af --- /dev/null +++ b/GPy/examples/poisson.py @@ -0,0 +1,50 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +""" +Simple Gaussian Processes classification +""" +import pylab as pb +import numpy as np +import GPy +pb.ion() + +pb.close('all') +default_seed=10000 + +model_type='Full' +inducing=4 +seed=default_seed +"""Simple 1D classification example. +:param model_type: type of model to fit ['Full', 'FITC', 'DTC']. +:param seed : seed value for data generation (default is 4). +:type seed: int +:param inducing : number of inducing variables (only used for 'FITC' or 'DTC'). +:type inducing: int +""" + +X = np.arange(0,100,5)[:,None] +F = np.round(np.sin(X/18.) + .1*X) +E = np.random.randint(-3,3,20)[:,None] +Y = F + E +pb.plot(X,F,'k-') +pb.plot(X,Y,'ro') +pb.figure() +likelihood = GPy.inference.likelihoods.poisson(Y,scale=4.) + +m = GPy.models.GP(X,likelihood=likelihood) +#m = GPy.models.GP(data['X'],Y=likelihood.Y) + +m.constrain_positive('var') +m.constrain_positive('len') +m.tie_param('lengthscale') +if not isinstance(m.likelihood,GPy.inference.likelihoods.gaussian): + m.approximate_likelihood() +print m.checkgrad() +# Optimize and plot +m.optimize() +#m.em(plot_all=False) # EM algorithm +m.plot() + +print(m) diff --git a/GPy/examples/sparse_ep_fix.py b/GPy/examples/sparse_ep_fix.py new file mode 100644 index 00000000..738a82e6 --- /dev/null +++ b/GPy/examples/sparse_ep_fix.py @@ -0,0 +1,76 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +import numpy as np +""" +Sparse Gaussian Processes regression with an RBF kernel +""" +import pylab as pb +import numpy as np +import GPy +np.random.seed(2) +pb.ion() +N = 500 +M = 5 + +###################################### +## 1 dimensional example + +# sample inputs and outputs +X = np.random.uniform(-3.,3.,(N,1)) +#Y = np.sin(X)+np.random.randn(N,1)*0.05 +F = np.sin(X)+np.random.randn(N,1)*0.05 +Y = np.ones([F.shape[0],1]) +Y[F<0] = -1 +likelihood = GPy.inference.likelihoods.probit(Y) + +# construct kernel +rbf = GPy.kern.rbf(1) +noise = GPy.kern.white(1) +kernel = rbf + noise + +# create simple GP model +#m1 = GPy.models.sparse_GP_regression(X, Y, kernel, M=M) +m1 = GPy.models.sparse_GP(X, kernel, M=M,likelihood= likelihood) + +# contrain all parameters to be positive +m1.constrain_positive('(variance|lengthscale|precision)') +#m1.constrain_positive('(variance|lengthscale)') +#m1.constrain_fixed('prec',10.) + + +#check gradient FIXME unit test please +m1.checkgrad() +# optimize and plot +m1.optimize('tnc', messages = 1) +m1.plot() +# print(m1) + +###################################### +## 2 dimensional example + +# # sample inputs and outputs +# X = np.random.uniform(-3.,3.,(N,2)) +# Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05 + +# # construct kernel +# rbf = GPy.kern.rbf(2) +# noise = GPy.kern.white(2) +# kernel = rbf + noise + +# # create simple GP model +# m2 = GPy.models.sparse_GP_regression(X,Y,kernel, M = 50) +# create simple GP model + +# # contrain all parameters to be positive (but not inducing inputs) +# m2.constrain_positive('(variance|lengthscale|precision)') + +# #check gradient FIXME unit test please +# m2.checkgrad() + +# # optimize and plot +# pb.figure() +# m2.optimize('tnc', messages = 1) +# m2.plot() +# print(m2) diff --git a/GPy/inference/EP.py b/GPy/inference/EP.py index f7c163b1..751d5ca8 100644 --- a/GPy/inference/EP.py +++ b/GPy/inference/EP.py @@ -9,7 +9,7 @@ from ..util.plot import gpplot from .. import kern class EP: - def __init__(self,covariance,likelihood,Kmn=None,Knn_diag=None,epsilon=1e-3,powerep=[1.,1.]): + def __init__(self,covariance,likelihood,Kmn=None,Knn_diag=None,epsilon=1e-3,power_ep=[1.,1.]): """ Expectation Propagation @@ -19,7 +19,7 @@ class EP: likelihood : Output's likelihood (likelihood class) kernel : a GPy kernel (kern class) inducing : Either an array specifying the inducing points location or a sacalar defining their number. None value for using a non-sparse model is used. - powerep : Power-EP parameters (eta,delta) - 2x1 numpy array (floats) + power_ep : Power-EP parameters (eta,delta) - 2x1 numpy array (floats) epsilon : Convergence criterion, maximum squared difference allowed between mean updates to stop iterations (float) """ self.likelihood = likelihood @@ -38,7 +38,7 @@ class EP: assert len(Knn_diag) == self.N, 'Knn_diagonal has size different from N' self.epsilon = epsilon - self.eta, self.delta = powerep + self.eta, self.delta = power_ep self.jitter = 1e-12 """ @@ -110,6 +110,7 @@ class Full(EP): self.Sigma = self.Sigma - Delta_tau/(1.+ Delta_tau*self.Sigma[i,i])*np.dot(si,si.T) self.mu = np.dot(self.Sigma,self.v_tilde) self.iterations += 1 + print self.tau_tilde[i] #TODO erase me #Sigma recomptutation with Cholesky decompositon Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*(self.K) B = np.eye(self.N) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K @@ -206,6 +207,7 @@ class DTC(EP): epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N self.np1.append(self.tau_tilde.copy()) self.np2.append(self.v_tilde.copy()) + return self.tau_tilde[:,None], self.v_tilde[:,None], self.Z_hat[:,None], self.tau_[:,None], self.v_[:,None] class FITC(EP): def fit_EP(self): @@ -306,3 +308,4 @@ class FITC(EP): epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N self.np1.append(self.tau_tilde.copy()) self.np2.append(self.v_tilde.copy()) + return self.tau_tilde[:,None], self.v_tilde[:,None], self.Z_hat[:,None], self.tau_[:,None], self.v_[:,None] diff --git a/GPy/models/GP.py b/GPy/models/GP.py index 4a8d23e9..ccfe95c7 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -29,8 +29,8 @@ class GP(model): """ - def __init__(self,X,Y=None,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,likelihood=None,epsilon_ep=1e-3,epsion_em=.1,powerep=[1.,1.]): - #TODO: specify beta parameter explicitely + def __init__(self,X,Y=None,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,likelihood=None,epsilon_ep=1e-3,epsion_em=.1,power_ep=[1.,1.]): + #TODO: make beta parameter explicit # parse arguments self.Xslices = Xslices @@ -87,7 +87,7 @@ class GP(model): else: # Y is defined after approximating the likelihood self.EP = True - self.eta,self.delta = powerep + self.eta,self.delta = power_ep self.epsilon_ep = epsilon_ep self.tau_tilde = np.ones([self.N,self.D]) self.v_tilde = np.zeros([self.N,self.D]) @@ -116,7 +116,7 @@ class GP(model): def approximate_likelihood(self): assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" - self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,powerep=[self.eta,self.delta]) + self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) self.tau_tilde, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() # Y: EP likelihood is defined as a regression model for mu_tilde self.Y = self.v_tilde/self.tau_tilde diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index cc2f62d6..a839f827 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -10,3 +10,4 @@ from generalized_FITC import generalized_FITC from sparse_GPLVM import sparse_GPLVM from uncollapsed_sparse_GP import uncollapsed_sparse_GP from GP import GP +from sparse_GP import sparse_GP diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py new file mode 100644 index 00000000..1164a1af --- /dev/null +++ b/GPy/models/sparse_GP.py @@ -0,0 +1,258 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +import pylab as pb +from ..util.linalg import mdot, jitchol, chol_inv, pdinv +from ..util.plot import gpplot +from .. import kern +from GP import GP +from ..inference.EP import Full +from ..inference.likelihoods import likelihood,probit,poisson,gaussian + +#Still TODO: +# make use of slices properly (kernel can now do this) +# enable heteroscedatic noise (kernel will need to compute psi2 as a (NxMxM) array) + +class sparse_GP(GP): + """ + Variational sparse GP model (Regression) + + :param X: inputs + :type X: np.ndarray (N x Q) + :param Y: observed data + :type Y: np.ndarray of observations (N x D) + :param kernel : the kernel/covariance function. See link kernels + :type kernel: a GPy kernel + :param Z: inducing inputs (optional, see note) + :type Z: np.ndarray (M x Q) | None + :param X_uncertainty: The uncertainty in the measurements of X (Gaussian variance) + :type X_uncertainty: np.ndarray (N x Q) | None + :param Zslices: slices for the inducing inputs (see slicing TODO: link) + :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) + :type M: int + :param beta: noise precision. TODO> ignore beta if doing EP + :type beta: float + :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) + :type normalize_(X|Y): bool + """ + + def __init__(self,X,Y,kernel=None, X_uncertainty=None, beta=100., Z=None,Zslices=None,M=10,normalize_X=False,normalize_Y=False,likelihood=None,method_ep='DTC',epsilon_ep=1e-3,epsilon_em=.1,power_ep=[1.,1.]): + + if Z is None: + self.Z = np.random.permutation(X.copy())[:M] + self.M = M + else: + assert Z.shape[1]==X.shape[1] + self.Z = Z + self.M = Z.shape[0] + if X_uncertainty is None: + self.has_uncertain_inputs=False + else: + assert X_uncertainty.shape==X.shape + self.has_uncertain_inputs=True + self.X_uncertainty = X_uncertainty + + + self.beta = beta #FIXME + GP.__init__(self, X, Y, kernel=kernel, normalize_X=normalize_X, normalize_Y=normalize_Y,likelihood=likelihood,epsilon_ep=epsilon_ep,epsion_em=epsilon_em,power_ep=power_ep) + self.beta = beta if isinstance(likelihood,gaussian) else self.tau_tilde #TODO this should be defined in GP.__init__ + + + #normalise X uncertainty also + if self.has_uncertain_inputs: + self.X_uncertainty /= np.square(self._Xstd) + + def _set_params(self, p): + if not self.EP: + self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) + self.beta = p[self.M*self.Q] + self.kern._set_params(p[self.Z.size + 1:]) + self.beta2 = self.beta**2 + self._compute_kernel_matrices() + self._computations() + else: + self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) + self.kern._set_params(p[self.Z.size:]) + #self._compute_kernel_matrices() this is replaced by _ep_covariance + self._ep_covariance() + self._ep_computations() + + def approximate_likelihood(self): + assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" + if self.ep_proxy == 'DTC': + self.ep_approx = DTC(self.Kmm,self.likelihood,self.psi1,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) + elif self.ep_proxy == 'FITC': + self.Knn_diag = self.kern.psi0(self.Z,self.X, self.X_uncertainty) #TODO psi0 already calculates this + self.ep_approx = FITC(self.Kmm,self.likelihood,self.psi1,self.Knn_diag,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) + else: + self.ep_approx = Full(self.X,self.likelihood,self.kernel,inducing=None,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) + self.beta, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() + # Y: EP likelihood is defined as a regression model for mu_tilde + self.Y = self.v_tilde/self.beta + self._Ymean = np.zeros((1,self.Y.shape[1])) + self._Ystd = np.ones((1,self.Y.shape[1])) + self.trbetaYYT = np.sum(self.beta*np.square(self.Y)) + if self.D > self.N: + # then it's more efficient to store YYT + self.YYT = np.dot(self.Y, self.Y.T) + else: + self.YYT = None + self.mu_ = self.v_/self.tau_ + self._ep_covariance() + self._computations() + + def _ep_covariance(self): + self.Kmm = self.kern.K(self.Z) + if self.has_uncertain_inputs: + self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() + self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T + self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) #FIXME include beta + else: + #self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices).sum() + self.Knn_diag = self.kern.Kdiag(self.X,slices=self.Xslices) + self.psi0 = (self.beta*self.Knn_diag).sum() #TODO check dimensions + self.psi1 = self.kern.K(self.Z,self.X) + #self.psi2 = np.dot(self.psi1,self.psi1.T) + self.psi2 = np.dot(self.psi1,self.beta*self.psi1.T) + + def _compute_kernel_matrices(self): + # kernel computations, using BGPLVM notation + #TODO: slices for psi statistics (easy enough) + + self.Kmm = self.kern.K(self.Z) + if self.has_uncertain_inputs: + self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() + self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T + self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) + else: + self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices).sum() + self.psi1 = self.kern.K(self.Z,self.X) + self.psi2 = np.dot(self.psi1,self.psi1.T) + + def _ep_computations(self): + # TODO find routine to multiply triangular matrices + self.V = self.beta*self.Y + self.psi1V = np.dot(self.psi1, self.V) + self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T) + self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) + #self.A = mdot(self.Lmi, self.beta*self.psi2, self.Lmi.T) + self.A = mdot(self.Lmi, self.psi2, self.Lmi.T) + self.B = np.eye(self.M) + self.A + self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) + self.LLambdai = np.dot(self.LBi, self.Lmi) + #self.trace_K = self.psi0 - np.sum(np.dot(self.Lmi,self.psi1)**2,-1) #TODO check + self.trace_K = self.psi0 - np.trace(self.A) + self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi) + self.C = mdot(self.LLambdai, self.psi1V) + self.G = mdot(self.LBL_inv, self.psi1VVpsi1, self.LBL_inv.T) + + # Compute dL_dpsi + #self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones(self.N) + self.dL_dpsi0 = - 0.5 * self.D * self.beta.flatten() * np.ones(self.N) #TODO check + self.dL_dpsi1 = mdot(self.LLambdai.T,self.C,self.V.T) + #self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) + self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) + + # Compute dL_dKmm + self.dL_dKmm = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB + self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC + self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE + + def _get_params(self): + if not self.EP: + return np.hstack([self.Z.flatten(),self.beta,self.kern._get_params_transformed()]) + else: + return np.hstack([self.Z.flatten(),self.kern._get_params_transformed()]) + + def _get_param_names(self): + if not self.EP: + return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern._get_param_names_transformed() + else: + return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + self.kern._get_param_names_transformed() + + def log_likelihood(self): + """ + Compute the (lower bound on the) log marginal likelihood + """ + beta_logdet = self.N*self.D*np.log(self.beta) if not self.EP else self.D*np.sum(np.log(self.beta)) + A = -0.5*self.N*self.D*(np.log(2.*np.pi)) - 0.5*beta_logdet + B = -0.5*self.beta*self.D*self.trace_K if not self.EP else -0.5*self.D*self.trace_K + C = -0.5*self.D * self.B_logdet + D = -0.5*self.beta*self.trYYT if not self.EP else -0.5*self.trbetaYYT + E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv) + return A+B+C+D+E + + def dL_dbeta(self): + """ + Compute the gradient of the log likelihood wrt beta. + """ + #TODO: suport heteroscedatic noise + dA_dbeta = 0.5 * self.N*self.D/self.beta + dB_dbeta = - 0.5 * self.D * self.trace_K + dC_dbeta = - 0.5 * self.D * np.sum(self.Bi*self.A)/self.beta + dD_dbeta = - 0.5 * self.trYYT + tmp = mdot(self.LBi.T, self.LLambdai, self.psi1V) + dE_dbeta = (np.sum(np.square(self.C)) - 0.5 * np.sum(self.A * np.dot(tmp, tmp.T)))/self.beta + + return np.squeeze(dA_dbeta + dB_dbeta + dC_dbeta + dD_dbeta + dE_dbeta) + + def dL_dtheta(self): + """ + Compute and return the derivative of the log marginal likelihood wrt the parameters of the kernel + """ + dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm,self.Z) + if self.has_uncertain_inputs: + dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z,self.X,self.X_uncertainty) + dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T,self.Z,self.X, self.X_uncertainty) + dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2,self.Z,self.X, self.X_uncertainty) # for multiple_beta, dL_dpsi2 will be a different shape + else: + #re-cast computations in psi2 back to psi1: + dL_dpsi1 = self.dL_dpsi1 + 2.*np.dot(self.dL_dpsi2,self.psi1) + dL_dtheta += self.kern.dK_dtheta(dL_dpsi1,self.Z,self.X) + dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X) + + return dL_dtheta + + def dL_dZ(self): + """ + The derivative of the bound wrt the inducing inputs Z + """ + dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm,self.Z,)#factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ + if self.has_uncertain_inputs: + dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1.T,self.Z,self.X, self.X_uncertainty) + dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2,self.Z,self.X, self.X_uncertainty) + else: + #re-cast computations in psi2 back to psi1: + dL_dpsi1 = self.dL_dpsi1 + 2.*np.dot(self.dL_dpsi2,self.psi1) + dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X) + return dL_dZ + + def _log_likelihood_gradients(self): + return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) + + def _raw_predict(self, Xnew, slices, full_cov=False): + """Internal helper function for making predictions, does not account for normalisation""" + Kx = self.kern.K(self.Z, Xnew) + mu = mdot(Kx.T, self.LBL_inv, self.psi1V) + if full_cov: + noise_term = np.eye(Xnew.shape[0])/self.beta if not self.EP else 0 + Kxx = self.kern.K(Xnew) + var = Kxx - mdot(Kx.T, (self.Kmmi - self.LBL_inv), Kx) + noise_term + else: + noise_term = 1./self.beta if not self.EP else 0 + Kxx = self.kern.Kdiag(Xnew) + var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.LBL_inv, Kx),0) + noise_term + return mu,var + + def plot(self, *args, **kwargs): + """ + Plot the fitted model: just call the GP_regression plot function and then add inducing inputs + """ + GP_regression.plot(self,*args,**kwargs) + if self.Q==1: + pb.plot(self.Z,self.Z*0+pb.ylim()[0],'k|',mew=1.5,markersize=12) + if self.has_uncertain_inputs: + pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_uncertainty.flatten())) + if self.Q==2: + pb.plot(self.Z[:,0],self.Z[:,1],'wo')