diff --git a/GPy/examples/sparse_ep_fix.py b/GPy/examples/sparse_ep_fix.py index 7e3f1fc3..f2c25898 100644 --- a/GPy/examples/sparse_ep_fix.py +++ b/GPy/examples/sparse_ep_fix.py @@ -31,18 +31,17 @@ noise = GPy.kern.white(1) kernel = rbf + noise # create simple GP model -#m1 = GPy.models.sparse_GP(X, Y, kernel, M=M) -m1 = GPy.models.sparse_GP(X,Y=None, kernel=kernel, M=M,likelihood= likelihood) +m = GPy.models.sparse_GP(X,Y=None, kernel=kernel, M=M,likelihood= likelihood) +#m = GPy.models.sparse_GP(X, Y, kernel, M=M) -print m1.checkgrad() # contrain all parameters to be positive -m1.constrain_positive('(variance|lengthscale|precision)') -#m1.constrain_positive('(variance|lengthscale)') -#m1.constrain_fixed('prec',10.) - +m.ensure_default_constraints() +if not isinstance(m.likelihood,GPy.inference.likelihoods.gaussian): + m.approximate_likelihood() +print m.checkgrad() #check gradient FIXME unit test please # optimize and plot -m1.optimize('tnc', messages = 1) -m1.plot() -# print(m1) +#m.optimize('tnc', messages = 1) +m.plot(samples=3,full_cov=False) +# print(m) diff --git a/GPy/inference/EP.py b/GPy/inference/EP.py index 5c473a8f..c3aad7c1 100644 --- a/GPy/inference/EP.py +++ b/GPy/inference/EP.py @@ -136,7 +136,7 @@ class DTC(EP): q(f|X) = int_{df}{N(f|KfuKuu_invu,diag(Kff-Qff)*N(u|0,Kuu)} = N(f|0,Sigma0) Sigma0 = Qnn = Knm*Kmmi*Kmn """ - self.Kmmi, self.Kmm_hld = pdinv(self.Kmm) + self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) self.KmnKnm = np.dot(self.Kmn, self.Kmn.T) self.KmmiKmn = np.dot(self.Kmmi,self.Kmn) self.Qnn_diag = np.sum(self.Kmn*self.KmmiKmn,-2) @@ -222,7 +222,7 @@ class FITC(EP): q(f|X) = int_{df}{N(f|KfuKuu_invu,diag(Kff-Qff)*N(u|0,Kuu)} = N(f|0,Sigma0) Sigma0 = diag(Knn-Qnn) + Qnn, Qnn = Knm*Kmmi*Kmn """ - self.Kmmi, self.Kmm_hld = pdinv(self.Kmm) + self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) self.P0 = self.Kmn.T self.KmnKnm = np.dot(self.P0.T, self.P0) self.KmmiKmn = np.dot(self.Kmmi,self.P0.T) diff --git a/GPy/models/GP.py b/GPy/models/GP.py index 4d80ab87..482143d6 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -196,7 +196,6 @@ class GP(model): This is to allow for different normalisations of the output dimensions. """ - #normalise X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd mu, var, phi = self._raw_predict(Xnew, slices, full_cov) @@ -224,13 +223,18 @@ class GP(model): if full_cov: Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) var = Kxx - np.dot(KiKx.T,Kx) + if self.EP: + raise NotImplementedError, "full_cov = True not implemented for EP" + #var = np.diag(var)[:,None] + #phi = self.likelihood.predictive_mean(mu,var) else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) var = Kxx - np.sum(np.multiply(KiKx,Kx),0) - phi = None if not self.EP else self.likelihood.predictive_mean(mu,var) + if self.EP: + phi = self.likelihood.predictive_mean(mu,var) return mu, var, phi - def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None): + def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,full_cov=False): """ :param samples: the number of a posteriori samples to plot :param which_data: which if the training data to plot (default all) @@ -268,13 +272,13 @@ class GP(model): if self.X.shape[1]==1: Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None] - m,v,phi = self.predict(Xnew,slices=which_functions) + m,v,phi = self.predict(Xnew,slices=which_functions,full_cov=full_cov) if self.EP: pb.subplot(211) gpplot(Xnew,m,v) if samples: #NOTE why don't we put samples as a parameter of gpplot - s = np.random.multivariate_normal(m.flatten(),np.diag(v),samples) + s = np.random.multivariate_normal(m.flatten(),np.diag(v.flatten()),samples) pb.plot(Xnew.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) pb.plot(Xorig,Yorig,'kx',mew=1.5) pb.xlim(xmin,xmax) @@ -288,7 +292,7 @@ class GP(model): resolution = 50 or resolution xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution] Xtest = np.vstack((xx.flatten(),yy.flatten())).T - zz,vv,phi = self.predict(Xtest,slices=which_functions) + zz,vv,phi = self.predict(Xtest,slices=which_functions,full_cov=full_cov) zz = zz.reshape(resolution,resolution) pb.contour(xx,yy,zz,vmin=zz.min(),vmax=zz.max(),cmap=pb.cm.jet) pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=zz.min(),vmax=zz.max()) diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index ea1ba100..8b1b6fb9 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -7,7 +7,7 @@ from ..util.linalg import mdot, jitchol, chol_inv, pdinv from ..util.plot import gpplot from .. import kern from GP import GP -from ..inference.EP import Full +from ..inference.EP import Full,DTC,FITC from ..inference.likelihoods import likelihood,probit,poisson,gaussian #Still TODO: @@ -36,6 +36,8 @@ class sparse_GP(GP): :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) :type normalize_(X|Y): bool :parm likelihood: a GPy likelihood, defaults to gaussian + :param method_ep: sparse approximation used by Expectation Propagation algorithm, defaults to DTC + :type M: string (Full|DTC|FITC) :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] :type powerep: list @@ -58,17 +60,22 @@ class sparse_GP(GP): self.X_uncertainty = X_uncertainty GP.__init__(self, X=X, Y=Y, kernel=kernel, normalize_X=normalize_X, normalize_Y=normalize_Y,likelihood=likelihood,epsilon_ep=epsilon_ep,power_ep=power_ep) - self.trYYT = np.sum(np.square(self.Y)) if not self.EP else None #normalise X uncertainty also if self.has_uncertain_inputs: self.X_uncertainty /= np.square(self._Xstd) + if not self.EP: + self.trYYT = np.sum(np.square(self.Y)) + else: + self.method_ep = method_ep + + def _set_params(self, p): self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) if not self.EP: - #self.beta = p[self.M*self.Q] - self.beta = np.repeat(p[self.M*self.Q],self.N)[:,None] + self.beta = p[self.M*self.Q] + #self.beta = np.repeat(p[self.M*self.Q],self.N)[:,None] self.kern._set_params(p[self.Z.size + 1:]) self.beta2 = self.beta**2 else: @@ -76,7 +83,7 @@ class sparse_GP(GP): if self.Y is None: self.Y = np.ones([self.N,1]) self._compute_kernel_matrices() - self._computations() + self._computations() #NOTE At this point computations of dL are not needed def _get_params(self): if not self.EP: @@ -123,24 +130,29 @@ class sparse_GP(GP): self.G = mdot(self.LBL_inv, self.psi1VVpsi1, self.LBL_inv.T) # Compute dL_dpsi - self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones([self.N,1]) + self.dL_dpsi0 = - 0.5 * self.D * self.beta.flatten() * np.ones(self.N) self.dL_dpsi1 = mdot(self.LLambdai.T,self.C,self.V.T) - self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) + #self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) + self.dL_dpsi2 = - 0.5 * (self.D*(self.LBL_inv - self.Kmmi) + self.G) # Compute dL_dKmm self.dL_dKmm = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB - self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC - self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE + #self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC + self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*mdot(self.LBL_inv, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC + #self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE + self.dL_dKmm += np.dot(np.dot(self.G,self.psi2_beta_scaled) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE def approximate_likelihood(self): assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" - if self.ep_proxy == 'DTC': + if self.method_ep == 'DTC': self.ep_approx = DTC(self.Kmm,self.likelihood,self.psi1,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) - elif self.ep_proxy == 'FITC': + elif self.method_ep == 'FITC': self.ep_approx = FITC(self.Kmm,self.likelihood,self.psi1,self.psi0,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) else: self.ep_approx = Full(self.X,self.likelihood,self.kernel,inducing=None,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) self.beta, self.Y, self.Z_ep = self.ep_approx.fit_EP() + print "Aqui toy" + self.trbetaYYT = np.sum(np.square(self.Y)*self.beta) self._computations() def log_likelihood(self): @@ -149,30 +161,11 @@ class sparse_GP(GP): """ if not self.EP: A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.beta)) + D = -0.5*self.beta*self.trYYT else: A = -0.5*self.D*(self.N*np.log(2.*np.pi) - np.sum(np.log(self.beta))) - B = -0.5*self.D*self.trace_K - C = -0.5*self.D * self.B_logdet - D = -0.5*self.beta*self.trYYT - E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv) - return A+B+C+D+E - - - - - def log_likelihood(self): - """ - Compute the (lower bound on the) log marginal likelihood - """ - beta_logdet = self.N*self.D*np.log(self.beta) if not self.EP else self.D*np.sum(np.log(self.beta)) - if self.hetero_noise: - A = foo - B = bar D = -0.5*self.trbetaYYT - else: - A = -0.5*self.N*self.D*(np.log(2.*np.pi)) - 0.5*beta_logdet - B = -0.5*self.beta*self.D*self.trace_K if not self.EP else -0.5*self.D*self.trace_K - D = -0.5*self.beta*self.trYYT + B = -0.5*self.D*self.trace_K C = -0.5*self.D * self.B_logdet E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv) return A+B+C+D+E @@ -223,21 +216,33 @@ class sparse_GP(GP): return dL_dZ def _log_likelihood_gradients(self): - return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) + if not self.EP: + return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) + else: + return np.hstack([self.dL_dZ().flatten(), self.dL_dtheta()]) def _raw_predict(self, Xnew, slices, full_cov=False): """Internal helper function for making predictions, does not account for normalisation""" Kx = self.kern.K(self.Z, Xnew) mu = mdot(Kx.T, self.LBL_inv, self.psi1V) + phi = None if full_cov: - noise_term = np.eye(Xnew.shape[0])/self.beta if not self.EP else 0 Kxx = self.kern.K(Xnew) - var = Kxx - mdot(Kx.T, (self.Kmmi - self.LBL_inv), Kx) + noise_term + var = Kxx - mdot(Kx.T, (self.Kmmi - self.LBL_inv), Kx) + if not self.EP: + var += np.eye(Xnew.shape[0])/self.beta # TODO: This beta doesn't belong here in the EP case. + else: + raise NotImplementedError, "full_cov = True not implemented for EP" + #var = np.diag(var)[:,None] + #phi = self.likelihood.predictive_mean(mu,var) else: - noise_term = 1./self.beta if not self.EP else 0 Kxx = self.kern.Kdiag(Xnew) - var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.LBL_inv, Kx),0) + noise_term - return mu,var,None#TODO add phi for EP + var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.LBL_inv, Kx),0) + if not self.EP: + var += 1./self.beta # TODO: This beta doesn't belong here in the EP case. + else: + phi = self.likelihood.predictive_mean(mu,var) + return mu,var,phi def plot(self, *args, **kwargs): """