From 29ec128c9d6620b20989c9bdb27de95c098927ef Mon Sep 17 00:00:00 2001 From: Ricardo Andrade Date: Mon, 28 Jan 2013 17:47:08 +0000 Subject: [PATCH] Other changes. --- GPy/examples/ep_fix.py | 12 ++-- GPy/examples/poisson.py | 2 +- GPy/examples/sparse_ep_fix.py | 34 +-------- GPy/inference/EP.py | 9 ++- GPy/inference/likelihoods.py | 32 ++++++++- GPy/models/GP.py | 92 +++++++++++-------------- GPy/models/sparse_GP.py | 126 +++++++++++++++++++++------------- 7 files changed, 164 insertions(+), 143 deletions(-) diff --git a/GPy/examples/ep_fix.py b/GPy/examples/ep_fix.py index 9b35b3ff..c4e025dd 100644 --- a/GPy/examples/ep_fix.py +++ b/GPy/examples/ep_fix.py @@ -11,11 +11,9 @@ import GPy pb.ion() pb.close('all') -default_seed=10000 model_type='Full' inducing=4 -seed=default_seed """Simple 1D classification example. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param seed : seed value for data generation (default is 4). @@ -23,21 +21,19 @@ seed=default_seed :param inducing : number of inducing variables (only used for 'FITC' or 'DTC'). :type inducing: int """ -data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) +data = GPy.util.datasets.toy_linear_1d_classification(seed=0) likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1]) m = GPy.models.GP(data['X'],likelihood=likelihood) -#m = GPy.models.GP(data['X'],Y=likelihood.Y) +#m = GPy.models.GP(data['X'],likelihood.Y) -m.constrain_positive('var') -m.constrain_positive('len') -m.tie_param('lengthscale') +m.ensure_default_constraints() if not isinstance(m.likelihood,GPy.inference.likelihoods.gaussian): m.approximate_likelihood() print m.checkgrad() # Optimize and plot m.optimize() #m.em(plot_all=False) # EM algorithm -m.plot() +m.plot(samples=3) print(m) diff --git a/GPy/examples/poisson.py b/GPy/examples/poisson.py index 5a1cc6af..71d80b30 100644 --- a/GPy/examples/poisson.py +++ b/GPy/examples/poisson.py @@ -31,7 +31,7 @@ Y = F + E pb.plot(X,F,'k-') pb.plot(X,Y,'ro') pb.figure() -likelihood = GPy.inference.likelihoods.poisson(Y,scale=4.) +likelihood = GPy.inference.likelihoods.poisson(Y,scale=6.) m = GPy.models.GP(X,likelihood=likelihood) #m = GPy.models.GP(data['X'],Y=likelihood.Y) diff --git a/GPy/examples/sparse_ep_fix.py b/GPy/examples/sparse_ep_fix.py index 738a82e6..7e3f1fc3 100644 --- a/GPy/examples/sparse_ep_fix.py +++ b/GPy/examples/sparse_ep_fix.py @@ -31,46 +31,18 @@ noise = GPy.kern.white(1) kernel = rbf + noise # create simple GP model -#m1 = GPy.models.sparse_GP_regression(X, Y, kernel, M=M) -m1 = GPy.models.sparse_GP(X, kernel, M=M,likelihood= likelihood) +#m1 = GPy.models.sparse_GP(X, Y, kernel, M=M) +m1 = GPy.models.sparse_GP(X,Y=None, kernel=kernel, M=M,likelihood= likelihood) +print m1.checkgrad() # contrain all parameters to be positive m1.constrain_positive('(variance|lengthscale|precision)') #m1.constrain_positive('(variance|lengthscale)') #m1.constrain_fixed('prec',10.) - #check gradient FIXME unit test please -m1.checkgrad() # optimize and plot m1.optimize('tnc', messages = 1) m1.plot() # print(m1) -###################################### -## 2 dimensional example - -# # sample inputs and outputs -# X = np.random.uniform(-3.,3.,(N,2)) -# Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05 - -# # construct kernel -# rbf = GPy.kern.rbf(2) -# noise = GPy.kern.white(2) -# kernel = rbf + noise - -# # create simple GP model -# m2 = GPy.models.sparse_GP_regression(X,Y,kernel, M = 50) -# create simple GP model - -# # contrain all parameters to be positive (but not inducing inputs) -# m2.constrain_positive('(variance|lengthscale|precision)') - -# #check gradient FIXME unit test please -# m2.checkgrad() - -# # optimize and plot -# pb.figure() -# m2.optimize('tnc', messages = 1) -# m2.plot() -# print(m2) diff --git a/GPy/inference/EP.py b/GPy/inference/EP.py index 751d5ca8..5d571888 100644 --- a/GPy/inference/EP.py +++ b/GPy/inference/EP.py @@ -110,7 +110,6 @@ class Full(EP): self.Sigma = self.Sigma - Delta_tau/(1.+ Delta_tau*self.Sigma[i,i])*np.dot(si,si.T) self.mu = np.dot(self.Sigma,self.v_tilde) self.iterations += 1 - print self.tau_tilde[i] #TODO erase me #Sigma recomptutation with Cholesky decompositon Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*(self.K) B = np.eye(self.N) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K @@ -122,7 +121,13 @@ class Full(EP): epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N self.np1.append(self.tau_tilde.copy()) self.np2.append(self.v_tilde.copy()) - return self.tau_tilde[:,None], self.v_tilde[:,None], self.Z_hat[:,None], self.tau_[:,None], self.v_[:,None] + + #Variables to be called from GP + mu_tilde = self.v_tilde/self.tau_tilde #When calling EP, this variable is used instead of Y in the GP model + sigma_sum = 1./self.tau_ + 1./self.tau_tilde + mu_diff_2 = (self.v_/self.tau_ - mu_tilde)**2 + Z_ep = np.sum(np.log(self.Z_hat)) + 0.5*np.sum(np.log(sigma_sum)) + 0.5*np.sum(mu_diff_2/sigma_sum) #Normalization constant + return self.tau_tilde[:,None], mu_tilde[:,None], Z_ep class DTC(EP): def fit_EP(self): diff --git a/GPy/inference/likelihoods.py b/GPy/inference/likelihoods.py index 7f5d9140..864afa57 100644 --- a/GPy/inference/likelihoods.py +++ b/GPy/inference/likelihoods.py @@ -21,6 +21,27 @@ class likelihood: self.location = location self.scale = scale + def plot1D(self,X,mean,var,Z=None,mean_Z=None,var_Z=None,samples=0): + """ + Plot the predictive distribution of the GP model for 1-dimensional inputs + + :param X: The points at which to make a prediction + :param Mean: mean values at X + :param Var: variance values at X + :param Z: Set of points to be highlighted in the plot, i.e. inducing points + :param mean_Z: mean values at Z + :param var_Z: variance values at Z + :samples: Number of samples to plot + """ + assert X.shape[1] == 1, 'Number of dimensions must be 1' + gpplot(X,mean,var.flatten()) + if samples: #NOTE why don't we put samples as a parameter of gpplot + s = np.random.multivariate_normal(mean.flatten(),np.diag(var),samples) + pb.plot(X.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) + #pb.subplot(211) + #self.plot1Da(X,mean,var,Z,mean_Z,var_Z) + + def plot1Da(self,X,mean,var,Z=None,mean_Z=None,var_Z=None): """ Plot the predictive distribution of the GP model for 1-dimensional inputs @@ -37,6 +58,7 @@ class likelihood: pb.errorbar(Z.flatten(),mean_Z.flatten(),2*np.sqrt(var_Z.flatten()),fmt='r+') pb.plot(Z,mean_Z,'ro') + """ def plot1Db(self,X_obs,X,phi,Z=None): assert X_obs.shape[1] == 1, 'Number of dimensions must be 1' gpplot(X,phi,np.zeros(X.shape[0])) @@ -45,6 +67,7 @@ class likelihood: if Z is not None: pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12) + """ def plot2D(self,X,X_new,F_new,U=None): """ Predictive distribution of the fitted GP model for 2-dimensional inputs @@ -98,7 +121,6 @@ class probit(likelihood): sigma2_hat = 1./tau_i - (phi/((tau_i**2+tau_i)*Z_hat))*(z+phi/Z_hat) return Z_hat, mu_hat, sigma2_hat - def predictive_mean(self,mu,var): mu = mu.flatten() var = var.flatten() @@ -107,6 +129,14 @@ class probit(likelihood): def _log_likelihood_gradients(): raise NotImplementedError + def plot(self,X,phi,X_obs,Z=None): + assert X_obs.shape[1] == 1, 'Number of dimensions must be 1' + gpplot(X,phi,np.zeros(X.shape[0])) + pb.plot(X_obs,(self.Y+1)/2,'kx',mew=1.5) + if Z is not None: + pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12) + pb.ylim(-0.2,1.2) + class poisson(likelihood): """ Poisson likelihood diff --git a/GPy/models/GP.py b/GPy/models/GP.py index ccfe95c7..3a9f6de8 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -24,13 +24,18 @@ class GP(model): :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object + :parm likelihood: a GPy likelihood, defaults to gaussian + :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 + :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] + :type powerep: list .. Note:: Multiple independent outputs are allowed using columns of Y """ + #TODO: make beta parameter explicit + #TODO: when using EP, predict needs to return 3 values otherwise it just needs 2. At the moment predict returns 3 values in any case. - def __init__(self,X,Y=None,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,likelihood=None,epsilon_ep=1e-3,epsion_em=.1,power_ep=[1.,1.]): - #TODO: make beta parameter explicit + def __init__(self,X,Y=None,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,likelihood=None,epsilon_ep=1e-3,epsilon_em=.1,power_ep=[1.,1.]): # parse arguments self.Xslices = Xslices @@ -54,7 +59,6 @@ class GP(model): self._Xmean = np.zeros((1,self.X.shape[1])) self._Xstd = np.ones((1,self.X.shape[1])) - # Y - likelihood related variables, these might change whether using EP or not if likelihood is None: assert Y is not None, "Either Y or likelihood must be defined" @@ -68,8 +72,9 @@ class GP(model): if isinstance(self.likelihood,gaussian): self.EP = False self.Y = Y + self.beta = 100.#FIXME beta should be an explicit parameter for this model - #here's some simple normalisation + # Here's some simple normalisation if normalize_Y: self._Ymean = Y.mean(0)[None,:] self._Ystd = Y.std(0)[None,:] @@ -89,50 +94,43 @@ class GP(model): self.EP = True self.eta,self.delta = power_ep self.epsilon_ep = epsilon_ep - self.tau_tilde = np.ones([self.N,self.D]) - self.v_tilde = np.zeros([self.N,self.D]) - self.tau_ = np.ones([self.N,self.D]) - self.v_ = np.zeros([self.N,self.D]) - self.Z_hat = np.ones([self.N,self.D]) + self.beta = np.ones([self.N,self.D]) + self.Z_ep = 0 + self.Y = None + self._Ymean = np.zeros((1,self.D)) + self._Ystd = np.ones((1,self.D)) model.__init__(self) def _set_params(self,p): - # TODO: remove beta when using EP + # TODO: add beta when not using EP self.kern._set_params_transformed(p) - if not self.EP: - self.K = self.kern.K(self.X,slices1=self.Xslices) - self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) - else: - self._ep_covariance() + self.K = self.kern.K(self.X,slices1=self.Xslices) + if self.EP: + self.K += np.diag(1./self.beta.flatten()) + #else: + # self.beta = p[-1] + self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) def _get_params(self): - # TODO: remove beta when using EP + # TODO: add beta when not using EP return self.kern._get_params_transformed() def _get_param_names(self): - # TODO: remove beta when using EP + # TODO: add beta when not using EP return self.kern._get_param_names_transformed() def approximate_likelihood(self): assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" - self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) - self.tau_tilde, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() - # Y: EP likelihood is defined as a regression model for mu_tilde - self.Y = self.v_tilde/self.tau_tilde - self._Ymean = np.zeros((1,self.Y.shape[1])) - self._Ystd = np.ones((1,self.Y.shape[1])) + self.ep_approx = Full(self.K,self.likelihood,epsilon = self.epsilon_ep,power_ep=[self.eta,self.delta]) + self.beta, self.Y, self.Z_ep = self.ep_approx.fit_EP() if self.D > self.N: # then it's more efficient to store YYT self.YYT = np.dot(self.Y, self.Y.T) else: self.YYT = None - self.mu_ = self.v_/self.tau_ - self._ep_covariance() - - def _ep_covariance(self): # Kernel plus noise variance term - self.K = self.kern.K(self.X,slices1=self.Xslices) + np.diag(1./self.tau_tilde.flatten()) + self.K = self.kern.K(self.X,slices1=self.Xslices) + np.diag(1./self.beta.flatten()) self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) def _model_fit_term(self): @@ -144,25 +142,16 @@ class GP(model): else: return -0.5*np.sum(np.multiply(self.Ki, self.YYT)) - def _normalization_term(self): - """ - Computes the marginal likelihood normalization constants - """ - sigma_sum = 1./self.tau_ + 1./self.tau_tilde - mu_diff_2 = (self.mu_ - self.Y)**2 - penalty_term = np.sum(np.log(self.Z_hat)) - return penalty_term + 0.5*np.sum(np.log(sigma_sum)) + 0.5*np.sum(mu_diff_2/sigma_sum) - def log_likelihood(self): """ The log marginal likelihood for an EP model can be written as the log likelihood of a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ - complexity_term = -0.5*self.D*self.Kplus_logdet - normalization_term = 0 if self.EP == False else self.normalization_term() - return complexity_term + normalization_term + self._model_fit_term() - + L = -0.5*selff.D*self.K_logdet + self.model_fit_term() + if self.EP: + L += self.normalisation_term() + return L def log_likelihood(self): complexity_term = -0.5*self.N*self.D*np.log(2.*np.pi) - 0.5*self.D*self.K_logdet @@ -174,7 +163,6 @@ class GP(model): dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) else: dL_dK = 0.5*(mdot(self.Ki, self.YYT, self.Ki) - self.D*self.Ki) - return dL_dK def _log_likelihood_gradients(self): @@ -267,7 +255,7 @@ class GP(model): Y = self.Y[which_data,:] Xorig = X*self._Xstd + self._Xmean - Yorig = Y*self._Ystd + self._Ymean if not self.EP else self.likelihood.Y + Yorig = Y*self._Ystd + self._Ymean #NOTE For EP this is v_tilde/beta if plot_limits is None: xmin,xmax = Xorig.min(0),Xorig.max(0) @@ -282,19 +270,17 @@ class GP(model): m,v,phi = self.predict(Xnew,slices=which_functions) if self.EP: pb.subplot(211) - gpplot(Xnew,m,v) - if samples: - s = np.random.multivariate_normal(m.flatten(),v,samples) - pb.plot(Xnew.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) - if not self.EP: - pb.plot(Xorig,Yorig,'kx',mew=1.5) - pb.xlim(xmin,xmax) - else: - pb.xlim(xmin,xmax) + if samples: #NOTE why don't we put samples as a parameter of gpplot + s = np.random.multivariate_normal(m.flatten(),np.diag(v),samples) + pb.plot(Xnew.flatten(),s.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) + pb.plot(Xorig,Yorig,'kx',mew=1.5) + pb.xlim(xmin,xmax) + + if self.EP: pb.subplot(212) - self.likelihood.plot1Db(self.X,Xnew,phi) + self.likelihood.plot(Xnew,phi,self.X) pb.xlim(xmin,xmax) elif self.X.shape[1]==2: diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 1164a1af..655f6252 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -37,7 +37,7 @@ class sparse_GP(GP): :type normalize_(X|Y): bool """ - def __init__(self,X,Y,kernel=None, X_uncertainty=None, beta=100., Z=None,Zslices=None,M=10,normalize_X=False,normalize_Y=False,likelihood=None,method_ep='DTC',epsilon_ep=1e-3,epsilon_em=.1,power_ep=[1.,1.]): + def __init__(self,X,Y=None,kernel=None, X_uncertainty=None, beta=100., Z=None,Zslices=None,M=10,normalize_X=False,normalize_Y=False,likelihood=None,method_ep='DTC',epsilon_ep=1e-3,epsilon_em=.1,power_ep=[1.,1.]): if Z is None: self.Z = np.random.permutation(X.copy())[:M] @@ -53,10 +53,8 @@ class sparse_GP(GP): self.has_uncertain_inputs=True self.X_uncertainty = X_uncertainty - - self.beta = beta #FIXME - GP.__init__(self, X, Y, kernel=kernel, normalize_X=normalize_X, normalize_Y=normalize_Y,likelihood=likelihood,epsilon_ep=epsilon_ep,epsion_em=epsilon_em,power_ep=power_ep) - self.beta = beta if isinstance(likelihood,gaussian) else self.tau_tilde #TODO this should be defined in GP.__init__ + GP.__init__(self, X=X, Y=Y, kernel=kernel, normalize_X=normalize_X, normalize_Y=normalize_Y,likelihood=likelihood,epsilon_ep=epsilon_ep,epsilon_em=epsilon_em,power_ep=power_ep) + self.trYYT = np.sum(np.square(self.Y)) if not self.EP else None #normalise X uncertainty also @@ -74,10 +72,55 @@ class sparse_GP(GP): else: self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) self.kern._set_params(p[self.Z.size:]) - #self._compute_kernel_matrices() this is replaced by _ep_covariance - self._ep_covariance() + #self._compute_kernel_matrices() this is replaced by _ep_kernel_matrices + self._ep_kernel_matrices() self._ep_computations() + def _compute_kernel_matrices(self): + # kernel computations, using BGPLVM notation + #TODO: slices for psi statistics (easy enough) + + self.Kmm = self.kern.K(self.Z) + if self.has_uncertain_inputs: + if self.hetero_noise: + raise NotImplementedError, "uncertain ips and het noise not yet supported" + else: + self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() + self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T + self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) + else: + if self.hetero_noise: + print "rick's stuff here" + else: + self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices).sum() + self.psi1 = self.kern.K(self.Z,self.X) + self.psi2 = np.dot(self.psi1,self.psi1.T) + + def _computations(self): + # TODO find routine to multiply triangular matrices + self.V = self.beta*self.Y + self.psi1V = np.dot(self.psi1, self.V) + self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T) + self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) + self.A = mdot(self.Lmi, self.beta*self.psi2, self.Lmi.T) + self.B = np.eye(self.M) + self.A + self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) + self.LLambdai = np.dot(self.LBi, self.Lmi) + self.trace_K = self.psi0 - np.trace(self.A)/self.beta + self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi) + self.C = mdot(self.LLambdai, self.psi1V) + self.G = mdot(self.LBL_inv, self.psi1VVpsi1, self.LBL_inv.T) + + # Compute dL_dpsi + self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones(self.N) + self.dL_dpsi1 = mdot(self.LLambdai.T,self.C,self.V.T) + self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) + + # Compute dL_dKmm + self.dL_dKmm = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB + self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC + self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE + def approximate_likelihood(self): assert not isinstance(self.likelihood, gaussian), "EP is only available for non-gaussian likelihoods" if self.ep_proxy == 'DTC': @@ -88,6 +131,22 @@ class sparse_GP(GP): else: self.ep_approx = Full(self.X,self.likelihood,self.kernel,inducing=None,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) self.beta, self.v_tilde, self.Z_hat, self.tau_, self.v_=self.ep_approx.fit_EP() + self._ep_kernel_matrices() + self._computations() + + def _ep_kernel_matrices(self): + self.Kmm = self.kern.K(self.Z) + if self.has_uncertain_inputs: + self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() + self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T + self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) #FIXME include beta + else: + self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices) + self.psi1 = self.kern.K(self.Z,self.X) + self.psi2 = np.dot(self.psi1,self.psi1.T) + self.psi2_beta_scaled = np.dot(self.psi1,self.beta*self.psi1.T) + + def _ep_computations(self): # Y: EP likelihood is defined as a regression model for mu_tilde self.Y = self.v_tilde/self.beta self._Ymean = np.zeros((1,self.Y.shape[1])) @@ -99,50 +158,17 @@ class sparse_GP(GP): else: self.YYT = None self.mu_ = self.v_/self.tau_ - self._ep_covariance() - self._computations() - - def _ep_covariance(self): - self.Kmm = self.kern.K(self.Z) - if self.has_uncertain_inputs: - self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() - self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T - self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) #FIXME include beta - else: - #self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices).sum() - self.Knn_diag = self.kern.Kdiag(self.X,slices=self.Xslices) - self.psi0 = (self.beta*self.Knn_diag).sum() #TODO check dimensions - self.psi1 = self.kern.K(self.Z,self.X) - #self.psi2 = np.dot(self.psi1,self.psi1.T) - self.psi2 = np.dot(self.psi1,self.beta*self.psi1.T) - - def _compute_kernel_matrices(self): - # kernel computations, using BGPLVM notation - #TODO: slices for psi statistics (easy enough) - - self.Kmm = self.kern.K(self.Z) - if self.has_uncertain_inputs: - self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() - self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T - self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) - else: - self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices).sum() - self.psi1 = self.kern.K(self.Z,self.X) - self.psi2 = np.dot(self.psi1,self.psi1.T) - - def _ep_computations(self): # TODO find routine to multiply triangular matrices self.V = self.beta*self.Y self.psi1V = np.dot(self.psi1, self.V) self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T) self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) #self.A = mdot(self.Lmi, self.beta*self.psi2, self.Lmi.T) - self.A = mdot(self.Lmi, self.psi2, self.Lmi.T) + self.A = mdot(self.Lmi, self.psi2_beta_scaled, self.Lmi.T) self.B = np.eye(self.M) + self.A self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) self.LLambdai = np.dot(self.LBi, self.Lmi) - #self.trace_K = self.psi0 - np.sum(np.dot(self.Lmi,self.psi1)**2,-1) #TODO check - self.trace_K = self.psi0 - np.trace(self.A) + self.trace_K = self.psi0.sum() - np.trace(self.A) self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi) self.C = mdot(self.LLambdai, self.psi1V) self.G = mdot(self.LBL_inv, self.psi1VVpsi1, self.LBL_inv.T) @@ -176,10 +202,15 @@ class sparse_GP(GP): Compute the (lower bound on the) log marginal likelihood """ beta_logdet = self.N*self.D*np.log(self.beta) if not self.EP else self.D*np.sum(np.log(self.beta)) - A = -0.5*self.N*self.D*(np.log(2.*np.pi)) - 0.5*beta_logdet - B = -0.5*self.beta*self.D*self.trace_K if not self.EP else -0.5*self.D*self.trace_K + if self.hetero_noise: + A = foo + B = bar + D = -0.5*self.trbetaYYT + else: + A = -0.5*self.N*self.D*(np.log(2.*np.pi)) - 0.5*beta_logdet + B = -0.5*self.beta*self.D*self.trace_K if not self.EP else -0.5*self.D*self.trace_K + D = -0.5*self.beta*self.trYYT C = -0.5*self.D * self.B_logdet - D = -0.5*self.beta*self.trYYT if not self.EP else -0.5*self.trbetaYYT E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv) return A+B+C+D+E @@ -243,13 +274,14 @@ class sparse_GP(GP): noise_term = 1./self.beta if not self.EP else 0 Kxx = self.kern.Kdiag(Xnew) var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.LBL_inv, Kx),0) + noise_term - return mu,var + return mu,var,None#TODO add phi for EP def plot(self, *args, **kwargs): """ Plot the fitted model: just call the GP_regression plot function and then add inducing inputs """ - GP_regression.plot(self,*args,**kwargs) + #GP_regression.plot(self,*args,**kwargs) + GP.plot(self,*args,**kwargs) if self.Q==1: pb.plot(self.Z,self.Z*0+pb.ylim()[0],'k|',mew=1.5,markersize=12) if self.has_uncertain_inputs: