From 2493e2d336ea3a93a105565579932c008fdae8f0 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Wed, 30 Jan 2013 15:51:36 +0000 Subject: [PATCH] integrated sparse GP regression and BGPLVM classes --- GPy/kern/rbf.py | 2 +- GPy/models/BGPLVM.py | 14 ++++++------- GPy/models/sparse_GP_regression.py | 32 ++++++++++++++++-------------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 208a9209..a7c52180 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -32,7 +32,7 @@ class rbf(kernpart): def __init__(self,D,variance=1.,lengthscale=None,ARD=False): self.D = D self.ARD = ARD - if ARD == False: + if not ARD: self.Nparam = 2 self.name = 'rbf' if lengthscale is not None: diff --git a/GPy/models/BGPLVM.py b/GPy/models/BGPLVM.py index 3fc257e9..05dad318 100644 --- a/GPy/models/BGPLVM.py +++ b/GPy/models/BGPLVM.py @@ -25,12 +25,12 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM): S = np.ones_like(X) * 1e-2# sparse_GP_regression.__init__(self, X, Y, X_uncertainty = S, **kwargs) - def get_param_names(self): + def _get_param_names(self): X_names = sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) S_names = sum([['S_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) - return (X_names + S_names + sparse_GP_regression.get_param_names(self)) + return (X_names + S_names + sparse_GP_regression._get_param_names(self)) - def get_param(self): + def _get_params(self): """ Horizontally stacks the parameters in order to present them to the optimizer. The resulting 1-D array has this structure: @@ -40,13 +40,13 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM): =============================================================== """ - return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP_regression.get_param(self))) + return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP_regression._get_params(self))) - def set_param(self,x): + def _set_params(self,x): N, Q = self.N, self.Q self.X = x[:self.X.size].reshape(N,Q).copy() self.X_uncertainty = x[(N*Q):(2*N*Q)].reshape(N,Q).copy() - sparse_GP_regression.set_param(self, x[(2*N*Q):]) + sparse_GP_regression._set_params(self, x[(2*N*Q):]) def dL_dmuS(self): dL_dmu_psi0, dL_dS_psi0 = self.kern.dpsi1_dmuS(self.dL_dpsi1,self.Z,self.X,self.X_uncertainty) @@ -57,6 +57,6 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM): return np.hstack((dL_dmu.flatten(), dL_dS.flatten())) - def log_likelihood_gradients(self): + def _log_likelihood_gradients(self): return np.hstack((self.dL_dmuS().flatten(), sparse_GP_regression.log_likelihood_gradients(self))) diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index 9d9649ad..07ce4d97 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -107,6 +107,20 @@ class sparse_GP_regression(GP_regression): self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - np.dot(self.C, self.psi1VVpsi1), self.Kmmi) + 0.5*self.E # dD + + def _set_params(self, p): + self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) + self.beta = p[self.M*self.Q] + self.kern._set_params(p[self.Z.size + 1:]) + self._computations() + + def _get_params(self): + return np.hstack([self.Z.flatten(),self.beta,self.kern._get_params_transformed()]) + + def _get_param_names(self): + return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern._get_param_names_transformed() + + def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ sf2 = self.scale_factor**2 @@ -116,18 +130,9 @@ class sparse_GP_regression(GP_regression): D = +0.5*np.sum(self.psi1VVpsi1 * self.C) return A+B+C+D - def set_param(self, p): - self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) - self.beta = p[self.M*self.Q] - self.kern.set_param(p[self.Z.size + 1:]) - self._computations() - - def get_param(self): - return np.hstack([self.Z.flatten(),self.beta,self.kern.extract_param()]) - - def get_param_names(self): - return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern.extract_param_names() - + def _log_likelihood_gradients(self): + return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) + def dL_dbeta(self): """ Compute the gradient of the log likelihood wrt beta. @@ -172,9 +177,6 @@ class sparse_GP_regression(GP_regression): dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X) return dL_dZ - def log_likelihood_gradients(self): - return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) - def _raw_predict(self, Xnew, slices, full_cov=False): """Internal helper function for making predictions, does not account for normalisation"""