From 6a2e0a1fe554dfe00036b6fdef82c9d437bff3f0 Mon Sep 17 00:00:00 2001 From: Ricardo Andrade Date: Fri, 25 Jan 2013 18:14:28 +0000 Subject: [PATCH] fixing EP and merging it with GP_regression --- GPy/examples/ep_fix.py | 11 +- GPy/inference/EP.py | 12 +- GPy/inference/likelihoods.py | 31 ++-- GPy/models/GP.py | 312 +++++++++++++++++++++++++++++++++++ GPy/models/GP_EP.py | 2 +- GPy/models/GP_EP2.py | 127 +++++++------- GPy/models/__init__.py | 1 + 7 files changed, 403 insertions(+), 93 deletions(-) create mode 100644 GPy/models/GP.py diff --git a/GPy/examples/ep_fix.py b/GPy/examples/ep_fix.py index e4999f30..2da94335 100644 --- a/GPy/examples/ep_fix.py +++ b/GPy/examples/ep_fix.py @@ -25,14 +25,15 @@ seed=default_seed data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1]) -m = GPy.models.GP_EP2(data['X'],likelihood) +m = GPy.models.GP(data['X'],likelihood=likelihood) -#m.constrain_positive('var') -#m.constrain_positive('len') -#m.tie_param('lengthscale') +m.constrain_positive('var') +m.constrain_positive('len') +m.tie_param('lengthscale') m.approximate_likelihood() +print m.checkgrad() # Optimize and plot -#m.optimize() +m.optimize() #m.em(plot_all=False) # EM algorithm m.plot() diff --git a/GPy/inference/EP.py b/GPy/inference/EP.py index fa691961..f7c163b1 100644 --- a/GPy/inference/EP.py +++ b/GPy/inference/EP.py @@ -60,7 +60,7 @@ class Full(EP): def fit_EP(self): """ The expectation-propagation algorithm. - For nomenclature see Rasmussen & Williams 2006 (pag. 52-60) + For nomenclature see Rasmussen & Williams 2006. """ #Prior distribution parameters: p(f|X) = N(f|0,K) #self.K = self.kernel.K(self.X,self.X) @@ -84,8 +84,6 @@ class Full(EP): phi = np.empty(self.N,dtype=float) mu_hat = np.empty(self.N,dtype=float) sigma2_hat = np.empty(self.N,dtype=float) - self.mu_hat = mu_hat #TODO erase me - self.sigma2_hat = sigma2_hat #TODO erase me #Approximation epsilon_np1 = self.epsilon + 1. @@ -95,21 +93,16 @@ class Full(EP): self.np2 = [self.v_tilde.copy()] while epsilon_np1 > self.epsilon or epsilon_np2 > self.epsilon: update_order = np.arange(self.N) - #random.shuffle(update_order) #TODO uncomment + random.shuffle(update_order) for i in update_order: #Cavity distribution parameters self.tau_[i] = 1./self.Sigma[i,i] - self.eta*self.tau_tilde[i] self.v_[i] = self.mu[i]/self.Sigma[i,i] - self.eta*self.v_tilde[i] #Marginal moments self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood.moments_match(i,self.tau_[i],self.v_[i]) - self.mu_hat[i] = mu_hat[i] #TODO erase me - self.sigma2_hat[i] = sigma2_hat[i] #TODO erase me - #if i == 3: - # a = b #Site parameters update Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./self.Sigma[i,i]) Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - self.mu[i]/self.Sigma[i,i]) - print Delta_tau self.tau_tilde[i] = self.tau_tilde[i] + Delta_tau self.v_tilde[i] = self.v_tilde[i] + Delta_v #Posterior distribution parameters update @@ -128,6 +121,7 @@ class Full(EP): epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N self.np1.append(self.tau_tilde.copy()) self.np2.append(self.v_tilde.copy()) + return self.tau_tilde[:,None], self.v_tilde[:,None], self.Z_hat[:,None], self.tau_[:,None], self.v_[:,None] class DTC(EP): def fit_EP(self): diff --git a/GPy/inference/likelihoods.py b/GPy/inference/likelihoods.py index ff4770f6..29e194e0 100644 --- a/GPy/inference/likelihoods.py +++ b/GPy/inference/likelihoods.py @@ -19,7 +19,7 @@ class likelihood: self.Y = Y self.N = self.Y.shape[0] - def plot1Da(self,X_new,Mean_new,Var_new,X_u,Mean_u,Var_u): + def plot1Da(self,X,mean,var,Z=None,mean_Z=None,var_Z=None): """ Plot the predictive distribution of the GP model for 1-dimensional inputs @@ -30,10 +30,18 @@ class likelihood: :param Mean_u: mean values at X_u :param Var_new: variance values at X_u """ - assert X_new.shape[1] == 1, 'Number of dimensions must be 1' - gpplot(X_new,Mean_new,Var_new) - pb.errorbar(X_u.flatten(),Mean_u.flatten(),2*np.sqrt(Var_u.flatten()),fmt='r+') - pb.plot(X_u,Mean_u,'ro') + assert X.shape[1] == 1, 'Number of dimensions must be 1' + gpplot(X,mean,var.flatten()) + pb.errorbar(Z.flatten(),mean_Z.flatten(),2*np.sqrt(var_Z.flatten()),fmt='r+') + pb.plot(Z,mean_Z,'ro') + + def plot1Db(self,X_obs,X,phi,Z=None): + assert X_obs.shape[1] == 1, 'Number of dimensions must be 1' + gpplot(X,phi,np.zeros(X.shape[0])) + pb.plot(X_obs,(self.Y+1)/2,'kx',mew=1.5) + pb.ylim(-0.2,1.2) + if Z is not None: + pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12) def plot2D(self,X,X_new,F_new,U=None): """ @@ -88,16 +96,11 @@ class probit(likelihood): sigma2_hat = 1./tau_i - (phi/((tau_i**2+tau_i)*Z_hat))*(z+phi/Z_hat) return Z_hat, mu_hat, sigma2_hat - def plot1Db(self,X,X_new,F_new,U=None): - assert X.shape[1] == 1, 'Number of dimensions must be 1' - gpplot(X_new,F_new,np.zeros(X_new.shape[0])) - pb.plot(X,(self.Y+1)/2,'kx',mew=1.5) - pb.ylim(-0.2,1.2) - if U is not None: - pb.plot(U,U*0+.5,'r|',mew=1.5,markersize=12) - def predictive_mean(self,mu,variance): - return stats.norm.cdf(mu/np.sqrt(1+variance)) + def predictive_mean(self,mu,var): + mu = mu.flatten() + var = var.flatten() + return stats.norm.cdf(mu/np.sqrt(1+var)) def _log_likelihood_gradients(): raise NotImplementedError diff --git a/GPy/models/GP.py b/GPy/models/GP.py new file mode 100644 index 00000000..4a8d23e9 --- /dev/null +++ b/GPy/models/GP.py @@ -0,0 +1,312 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +import numpy as np +import pylab as pb +from .. import kern +from ..core import model +from ..util.linalg import pdinv,mdot +from ..util.plot import gpplot, Tango +from ..inference.EP import Full +from ..inference.likelihoods import likelihood,probit,poisson,gaussian + +class GP(model): + """ + Gaussian Process model for regression + + :param X: input observations + :param Y: observed values + :param kernel: a GPy kernel, defaults to rbf+white + :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) + :type normalize_X: False|True + :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) + :type normalize_Y: False|True + :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) + :rtype: model object + + .. Note:: Multiple independent outputs are allowed using columns of Y + + """ + + def __init__(self,X,Y=None,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,likelihood=None,epsilon_ep=1e-3,epsion_em=.1,powerep=[1.,1.]): + #TODO: specify beta parameter explicitely + + # parse arguments + self.Xslices = Xslices + self.X = X + self.N, self.Q = self.X.shape + assert len(self.X.shape)==2 + if kernel is None: + kernel = kern.rbf(X.shape[1]) + kern.bias(X.shape[1]) + kern.white(X.shape[1]) + else: + assert isinstance(kernel, kern.kern) + self.kern = kernel + + #here's some simple normalisation + if normalize_X: + self._Xmean = X.mean(0)[None,:] + self._Xstd = X.std(0)[None,:] + self.X = (X.copy() - self._Xmean) / self._Xstd + if hasattr(self,'Z'): + self.Z = (self.Z - self._Xmean) / self._Xstd + else: + self._Xmean = np.zeros((1,self.X.shape[1])) + self._Xstd = np.ones((1,self.X.shape[1])) + + + # Y - likelihood related variables, these might change whether using EP or not + if likelihood is None: + assert Y is not None, "Either Y or likelihood must be defined" + self.likelihood = gaussian(Y) + else: + self.likelihood = likelihood + assert len(self.likelihood.Y.shape)==2 + assert self.X.shape[0] == self.likelihood.Y.shape[0] + self.N, self.D = self.likelihood.Y.shape + + if isinstance(self.likelihood,gaussian): + self.EP = False + self.Y = Y + + #here's some simple normalisation + if normalize_Y: + self._Ymean = Y.mean(0)[None,:] + self._Ystd = Y.std(0)[None,:] + self.Y = (Y.copy()- self._Ymean) / self._Ystd + else: + self._Ymean = np.zeros((1,self.Y.shape[1])) + self._Ystd = np.ones((1,self.Y.shape[1])) + + if self.D > self.N: + # then it's more efficient to store YYT + self.YYT = np.dot(self.Y, self.Y.T) + else: + self.YYT = None + + else: + # Y is defined after approximating the likelihood + self.EP = True + self.eta,self.delta = powerep + self.epsilon_ep = epsilon_ep + self.tau_tilde = np.ones([self.N,self.D]) + self.v_tilde = np.zeros([self.N,self.D]) + self.tau_ = np.ones([self.N,self.D]) + self.v_ = np.zeros([self.N,self.D]) + self.Z_hat = np.ones([self.N,self.D]) + + model.__init__(self) + + def _set_params(self,p): + # TODO: remove beta when using EP + self.kern._set_params_transformed(p) + if not self.EP: + self.K = self.kern.K(self.X,slices1=self.Xslices) + self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) + else: + self._ep_covariance() + + def _get_params(self): + # TODO: remove beta when using EP + return self.kern._get_params_transformed() + + def _get_param_names(self): + # TODO: remove beta when using EP + return self.kern._get_param_names_transformed() + + def approximate_likelihood(self): + assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" + self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,powerep=[self.eta,self.delta]) + self.tau_tilde, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() + # Y: EP likelihood is defined as a regression model for mu_tilde + self.Y = self.v_tilde/self.tau_tilde + self._Ymean = np.zeros((1,self.Y.shape[1])) + self._Ystd = np.ones((1,self.Y.shape[1])) + if self.D > self.N: + # then it's more efficient to store YYT + self.YYT = np.dot(self.Y, self.Y.T) + else: + self.YYT = None + self.mu_ = self.v_/self.tau_ + self._ep_covariance() + + def _ep_covariance(self): + # Kernel plus noise variance term + self.K = self.kern.K(self.X,slices1=self.Xslices) + np.diag(1./self.tau_tilde.flatten()) + self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) + + def _model_fit_term(self): + """ + Computes the model fit using YYT if it's available + """ + if self.YYT is None: + return -0.5*np.sum(np.square(np.dot(self.Li,self.Y))) + else: + return -0.5*np.sum(np.multiply(self.Ki, self.YYT)) + + def _normalization_term(self): + """ + Computes the marginal likelihood normalization constants + """ + sigma_sum = 1./self.tau_ + 1./self.tau_tilde + mu_diff_2 = (self.mu_ - self.Y)**2 + penalty_term = np.sum(np.log(self.Z_hat)) + return penalty_term + 0.5*np.sum(np.log(sigma_sum)) + 0.5*np.sum(mu_diff_2/sigma_sum) + + def log_likelihood(self): + """ + The log marginal likelihood for an EP model can be written as the log likelihood of + a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance + matrix K* = K + diag(1./tau_tilde) plus a normalization term. + """ + complexity_term = -0.5*self.D*self.Kplus_logdet + normalization_term = 0 if self.EP == False else self.normalization_term() + return complexity_term + normalization_term + self._model_fit_term() + + + def log_likelihood(self): + complexity_term = -0.5*self.N*self.D*np.log(2.*np.pi) - 0.5*self.D*self.K_logdet + return complexity_term + self._model_fit_term() + + def dL_dK(self): + if self.YYT is None: + alpha = np.dot(self.Ki,self.Y) + dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) + else: + dL_dK = 0.5*(mdot(self.Ki, self.YYT, self.Ki) - self.D*self.Ki) + + return dL_dK + + def _log_likelihood_gradients(self): + return self.kern.dK_dtheta(partial=self.dL_dK(),X=self.X) + + def predict(self,Xnew, slices=None, full_cov=False): + """ + + Predict the function(s) at the new point(s) Xnew. + + Arguments + --------- + :param Xnew: The points at which to make a prediction + :type Xnew: np.ndarray, Nnew x self.Q + :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) + :type slices: (None, list of slice objects, list of ints) + :param full_cov: whether to return the folll covariance matrix, or just the diagonal + :type full_cov: bool + :rtype: posterior mean, a Numpy array, Nnew x self.D + :rtype: posterior variance, a Numpy array, Nnew x Nnew x (self.D) + + .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. + + - If None, the new points covary throigh every kernel part (default) + - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part + - If a list of booleans, specifying which kernel parts are active + + If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. + This is to allow for different normalisations of the output dimensions. + + + """ + + #normalise X values + Xnew = (Xnew.copy() - self._Xmean) / self._Xstd + mu, var, phi = self._raw_predict(Xnew, slices, full_cov) + + #un-normalise + mu = mu*self._Ystd + self._Ymean + if full_cov: + if self.D==1: + var *= np.square(self._Ystd) + else: + var = var[:,:,None] * np.square(self._Ystd) + else: + if self.D==1: + var *= np.square(np.squeeze(self._Ystd)) + else: + var = var[:,None] * np.square(self._Ystd) + + return mu,var,phi + + def _raw_predict(self,_Xnew,slices, full_cov=False): + """Internal helper function for making predictions, does not account for normalisation""" + Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) + mu = np.dot(np.dot(Kx.T,self.Ki),self.Y) + KiKx = np.dot(self.Ki,Kx) + if full_cov: + Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) + var = Kxx - np.dot(KiKx.T,Kx) + else: + Kxx = self.kern.Kdiag(_Xnew, slices=slices) + var = Kxx - np.sum(np.multiply(KiKx,Kx),0) + phi = None if not self.EP else self.likelihood.predictive_mean(mu,var) + return mu, var, phi + + def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None): + """ + :param samples: the number of a posteriori samples to plot + :param which_data: which if the training data to plot (default all) + :type which_data: 'all' or a slice object to slice self.X, self.Y + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :param which_functions: which of the kernel functions to plot (additively) + :type which_functions: list of bools + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + + Plot the posterior of the GP. + - In one dimension, the function is plotted with a shaded region identifying two standard deviations. + - In two dimsensions, a contour-plot shows the mean predicted function + - In higher dimensions, we've no implemented this yet !TODO! + + Can plot only part of the data and part of the posterior functions using which_data and which_functions + """ + if which_functions=='all': + which_functions = [True]*self.kern.Nparts + if which_data=='all': + which_data = slice(None) + + X = self.X[which_data,:] + Y = self.Y[which_data,:] + + Xorig = X*self._Xstd + self._Xmean + Yorig = Y*self._Ystd + self._Ymean if not self.EP else self.likelihood.Y + + if plot_limits is None: + xmin,xmax = Xorig.min(0),Xorig.max(0) + xmin, xmax = xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin) + elif len(plot_limits)==2: + xmin, xmax = plot_limits + else: + raise ValueError, "Bad limits for plotting" + + if self.X.shape[1]==1: + Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None] + m,v,phi = self.predict(Xnew,slices=which_functions) + if self.EP: + pb.subplot(211) + + gpplot(Xnew,m,v) + if samples: + s = np.random.multivariate_normal(m.flatten(),v,samples) + pb.plot(Xnew.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) + + if not self.EP: + pb.plot(Xorig,Yorig,'kx',mew=1.5) + pb.xlim(xmin,xmax) + else: + pb.xlim(xmin,xmax) + pb.subplot(212) + self.likelihood.plot1Db(self.X,Xnew,phi) + pb.xlim(xmin,xmax) + + elif self.X.shape[1]==2: + resolution = 50 or resolution + xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution] + Xtest = np.vstack((xx.flatten(),yy.flatten())).T + zz,vv,phi = self.predict(Xtest,slices=which_functions) + zz = zz.reshape(resolution,resolution) + pb.contour(xx,yy,zz,vmin=zz.min(),vmax=zz.max(),cmap=pb.cm.jet) + pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=zz.min(),vmax=zz.max()) + pb.xlim(xmin[0],xmax[0]) + pb.ylim(xmin[1],xmax[1]) + + else: + raise NotImplementedError, "Cannot plot GPs with more than two input dimensions" diff --git a/GPy/models/GP_EP.py b/GPy/models/GP_EP.py index 302ff366..1c0b9cf6 100644 --- a/GPy/models/GP_EP.py +++ b/GPy/models/GP_EP.py @@ -62,7 +62,7 @@ class GP_EP(model): self.L = jitchol(B) V,info = linalg.flapack.dtrtrs(self.L,self.Sroot_tilde_K,lower=1) self.Sigma = self.K - np.dot(V.T,V) - self.mu = np.dot(self.Sigma,self.ep_approx.v_tilde) + self.mu = np.dot(self.Sigma,self.ep_approx.v_tilde) * self.Z_hat def log_likelihood(self): """ diff --git a/GPy/models/GP_EP2.py b/GPy/models/GP_EP2.py index c68e7b70..ce869951 100644 --- a/GPy/models/GP_EP2.py +++ b/GPy/models/GP_EP2.py @@ -36,14 +36,11 @@ class GP_EP2(model): self.Xslices = Xslices assert isinstance(kernel, kern.kern) self.likelihood = likelihood - #self.Y = self.likelihood.Y #we might not need this self.kern = kernel self.X = X assert len(self.X.shape)==2 - #assert len(self.Y.shape)==2 - #assert self.X.shape[0] == self.Y.shape[0] - #self.N, self.D = self.Y.shape - self.D = 1 + assert self.X.shape[0] == self.likelihood.Y.shape[0] + self.D = self.likelihood.Y.shape[1] self.N, self.Q = self.X.shape #here's some simple normalisation @@ -75,14 +72,17 @@ class GP_EP2(model): """ self.eta,self.delta = powerep self.epsilon_ep = epsilon_ep - self.tau_tilde = np.zeros([self.N,self.D]) + self.tau_tilde = np.ones([self.N,self.D]) self.v_tilde = np.zeros([self.N,self.D]) + self.tau_ = np.ones([self.N,self.D]) + self.v_ = np.zeros([self.N,self.D]) + self.Z_hat = np.ones([self.N,self.D]) model.__init__(self) def _set_params(self,p): self.kern._set_params_transformed(p) self.K = self.kern.K(self.X,slices1=self.Xslices) - self.posterior_params() + self._ep_params() def _get_params(self): return self.kern._get_params_transformed() @@ -92,52 +92,63 @@ class GP_EP2(model): def approximate_likelihood(self): self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,powerep=[self.eta,self.delta]) - self.ep_approx.fit_EP() - self.tau_tilde = self.ep_approx.tau_tilde[:,None] - self.v_tilde = self.ep_approx.tau_tilde[:,None] - self.posterior_params() - self.Y = self.v_tilde/self.tau_tilde - self._Ymean = np.zeros((1,self.Y.shape[1])) - self._Ystd = np.ones((1,self.Y.shape[1])) - #self.YYT = np.dot(self.Y, self.Y.T) + self.tau_tilde, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() + self._ep_params() - def posterior_params(self): - self.Sroot_tilde_K = np.sqrt(self.tau_tilde.flatten())[:,None]*self.K + def _ep_params(self): + # Posterior mean and Variance computation + self.Sroot_tilde_K = np.sqrt(self.tau_tilde)*self.K B = np.eye(self.N) + np.sqrt(self.tau_tilde.flatten())[None,:]*self.Sroot_tilde_K self.Bi,self.L,self.Li,B_logdet = pdinv(B) V = np.dot(self.Li,self.Sroot_tilde_K) - #V,info = linalg.flapack.dtrtrs(self.L,self.Sroot_tilde_K,lower=1) - self.Sigma = self.K - np.dot(V.T,V) - self.mu = np.dot(self.Sigma,self.v_tilde.flatten()) + self.Sigma = self.K - np.dot(V.T,V) #posterior variance + self.mu = np.dot(self.Sigma,self.v_tilde) #posterior mean + # Kernel plus noise variance term + self.Kplus = self.K + np.diag(1./self.tau_tilde.flatten()) + self.Kplusi,self.Lplus,self.Lplusi,self.Kplus_logdet = pdinv(self.Kplus) + # Y: EP likelihood is defined as a regression model for mu_tilde + self.Y = self.v_tilde/self.tau_tilde + self._Ymean = np.zeros((1,self.Y.shape[1])) + self._Ystd = np.ones((1,self.Y.shape[1])) + self.YYT = None #np.dot(self.Y, self.Y.T) + self.mu_ = self.v_/self.tau_ + def _model_fit_term(self): + """ + Computes the model fit using YYT if it's available + """ + if self.YYT is None: + return -0.5*np.sum(np.square(np.dot(self.Lplusi,self.Y))) + else: + return -0.5*np.sum(np.multiply(self.Kplusi, self.YYT)) - #def _model_fit_term(self): - # """ - # Computes the model fit using YYT if it's available - # """ - # if self.YYT is None: - # return -0.5*np.sum(np.square(np.dot(self.Li,self.Y))) - # else: - # return -0.5*np.sum(np.multiply(self.Ki, self.YYT)) + def _normalization_term(self): + """ + Computes the marginal likelihood normalization constants + """ + sigma_sum = 1./self.tau_ + 1./self.tau_tilde + mu_diff_2 = (self.mu_ - self.Y)**2 + penalty_term = np.sum(np.log(self.Z_hat)) + return penalty_term + 0.5*np.sum(np.log(sigma_sum)) + 0.5*np.sum(mu_diff_2/sigma_sum) def log_likelihood(self): - mu_ = self.ep_approx.v_/self.ep_approx.tau_ - L1 =.5*sum(np.log(1+self.ep_approx.tau_tilde*1./self.ep_approx.tau_))-sum(np.log(np.diag(self.L))) - L2A =.5*np.sum((self.Sigma-np.diag(1./(self.ep_approx.tau_+self.ep_approx.tau_tilde))) * np.dot(self.ep_approx.v_tilde[:,None],self.ep_approx.v_tilde[None,:])) - L2B = .5*np.dot(mu_*(self.ep_approx.tau_/(self.ep_approx.tau_tilde+self.ep_approx.tau_)),self.ep_approx.tau_tilde*mu_ - 2*self.ep_approx.v_tilde) - L3 = sum(np.log(self.ep_approx.Z_hat)) - return L1 + L2A + L2B + L3 + """ + The log marginal likelihood for an EP model can be written as the log likelihood of + a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance + matrix K* = K + diag(1./tau_tilde) plus a normalization term. + """ + complexity_term = -0.5*self.D*self.Kplus_logdet + return complexity_term + self._model_fit_term() + self._normalization_term() - def dL_dK(self): #FIXME + def dL_dK(self): if self.YYT is None: - alpha = np.dot(self.Ki,self.Y) - dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) + alpha = np.dot(self.Kplusi,self.Y) + dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Kplusi) else: - dL_dK = 0.5*(mdot(self.Ki, self.YYT, self.Ki) - self.D*self.Ki) - + dL_dK = 0.5*(mdot(self.Kplusi, self.YYT, self.Kplusi) - self.D*self.Kplusi) return dL_dK - def _log_likelihood_gradients(self): #FIXME + def _log_likelihood_gradients(self): return self.kern.dK_dtheta(partial=self.dL_dK(),X=self.X) def predict(self,Xnew, slices=None, full_cov=False): @@ -189,32 +200,20 @@ class GP_EP2(model): def _raw_predict(self,_Xnew,slices, full_cov=False): """Internal helper function for making predictions, does not account for normalisation""" - """ - Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) - mu = np.dot(np.dot(Kx.T,self.Ki),self.Y) - KiKx = np.dot(self.Ki,Kx) + K_x = self.kern.K(self.X,_Xnew,slices1=self.Xslices,slices2=slices) + aux2 = mdot(self.Bi,self.Sroot_tilde_K,self.v_tilde) + zeta = np.sqrt(self.tau_tilde)*aux2 + f = np.dot(K_x.T,self.v_tilde-zeta) + v = mdot(self.Li,np.sqrt(self.tau_tilde)*K_x) if full_cov: - Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) - var = Kxx - np.dot(KiKx.T,Kx) + Kxx = self.kern.K(_Xnew,slices1=slices,slices2=slices) + var = Kxx - np.dot(v.T,v) + var_diag = np.diag(var)[:,None] else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) - var = Kxx - np.sum(np.multiply(KiKx,Kx),0) - return mu, var - """ - K_x = self.kern.K(self.X,_Xnew) - Kxx = self.kern.K(_Xnew) - #aux1,info = linalg.flapack.dtrtrs(self.L,np.dot(self.Sroot_tilde_K,self.ep_approx.v_tilde),lower=1) - #aux2,info = linalg.flapack.dtrtrs(self.L.T, aux1,lower=0) - #aux2 = mdot(self.Li.T,self.Li,self.Sroot_tilde_K,self.ep_approx.v_tilde) - aux2 = mdot(self.Bi,self.Sroot_tilde_K,self.ep_approx.v_tilde) - zeta = np.sqrt(self.ep_approx.tau_tilde)*aux2 - f = np.dot(K_x.T,self.ep_approx.v_tilde-zeta) - #v,info = linalg.flapack.dtrtrs(self.L,np.sqrt(self.ep_approx.tau_tilde)[:,None]*K_x,lower=1) - v = mdot(self.Li,np.sqrt(self.ep_approx.tau_tilde)[:,None]*K_x) - variance = Kxx - np.dot(v.T,v) - vdiag = np.diag(variance) - y=self.likelihood.predictive_mean(f,vdiag) - return f,vdiag,y + var_diag = (Kxx - np.sum(v**2,-2))[:,None] + phi = self.likelihood.predictive_mean(f,var_diag) + return f, var_diag, phi def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None): """ @@ -257,7 +256,7 @@ class GP_EP2(model): #gpplot(Xnew,m,v) mu_f, var_f, phi_f = self.predict(Xnew,slices=which_functions) pb.subplot(211) - self.likelihood.plot1Da(X_new=Xnew,Mean_new=mu_f,Var_new=var_f,X_u=self.X,Mean_u=self.mu,Var_u=np.diag(self.Sigma)) + self.likelihood.plot1Da(X=Xnew,mean=mu_f,var=var_f,Z=self.X,mean_Z=self.mu,var_Z=np.diag(self.Sigma)) if samples: s = np.random.multivariate_normal(m.flatten(),v,samples) pb.plot(Xnew.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 5f824f2b..ca44aab1 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -11,3 +11,4 @@ from GP_EP2 import GP_EP2 from generalized_FITC import generalized_FITC from sparse_GPLVM import sparse_GPLVM from uncollapsed_sparse_GP import uncollapsed_sparse_GP +from GP import GP