integrated sparse GP regression and BGPLVM classes

This commit is contained in:
Nicolo Fusi 2013-01-30 15:51:36 +00:00
parent 079a20620a
commit 2493e2d336
3 changed files with 25 additions and 23 deletions

View file

@ -32,7 +32,7 @@ class rbf(kernpart):
def __init__(self,D,variance=1.,lengthscale=None,ARD=False): def __init__(self,D,variance=1.,lengthscale=None,ARD=False):
self.D = D self.D = D
self.ARD = ARD self.ARD = ARD
if ARD == False: if not ARD:
self.Nparam = 2 self.Nparam = 2
self.name = 'rbf' self.name = 'rbf'
if lengthscale is not None: if lengthscale is not None:

View file

@ -25,12 +25,12 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
S = np.ones_like(X) * 1e-2# S = np.ones_like(X) * 1e-2#
sparse_GP_regression.__init__(self, X, Y, X_uncertainty = S, **kwargs) sparse_GP_regression.__init__(self, X, Y, X_uncertainty = S, **kwargs)
def get_param_names(self): def _get_param_names(self):
X_names = sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) X_names = sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
S_names = sum([['S_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) S_names = sum([['S_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
return (X_names + S_names + sparse_GP_regression.get_param_names(self)) return (X_names + S_names + sparse_GP_regression._get_param_names(self))
def get_param(self): def _get_params(self):
""" """
Horizontally stacks the parameters in order to present them to the optimizer. Horizontally stacks the parameters in order to present them to the optimizer.
The resulting 1-D array has this structure: The resulting 1-D array has this structure:
@ -40,13 +40,13 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
=============================================================== ===============================================================
""" """
return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP_regression.get_param(self))) return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP_regression._get_params(self)))
def set_param(self,x): def _set_params(self,x):
N, Q = self.N, self.Q N, Q = self.N, self.Q
self.X = x[:self.X.size].reshape(N,Q).copy() self.X = x[:self.X.size].reshape(N,Q).copy()
self.X_uncertainty = x[(N*Q):(2*N*Q)].reshape(N,Q).copy() self.X_uncertainty = x[(N*Q):(2*N*Q)].reshape(N,Q).copy()
sparse_GP_regression.set_param(self, x[(2*N*Q):]) sparse_GP_regression._set_params(self, x[(2*N*Q):])
def dL_dmuS(self): def dL_dmuS(self):
dL_dmu_psi0, dL_dS_psi0 = self.kern.dpsi1_dmuS(self.dL_dpsi1,self.Z,self.X,self.X_uncertainty) dL_dmu_psi0, dL_dS_psi0 = self.kern.dpsi1_dmuS(self.dL_dpsi1,self.Z,self.X,self.X_uncertainty)
@ -57,6 +57,6 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
return np.hstack((dL_dmu.flatten(), dL_dS.flatten())) return np.hstack((dL_dmu.flatten(), dL_dS.flatten()))
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
return np.hstack((self.dL_dmuS().flatten(), sparse_GP_regression.log_likelihood_gradients(self))) return np.hstack((self.dL_dmuS().flatten(), sparse_GP_regression.log_likelihood_gradients(self)))

View file

@ -107,6 +107,20 @@ class sparse_GP_regression(GP_regression):
self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC
self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - np.dot(self.C, self.psi1VVpsi1), self.Kmmi) + 0.5*self.E # dD self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - np.dot(self.C, self.psi1VVpsi1), self.Kmmi) + 0.5*self.E # dD
def _set_params(self, p):
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q)
self.beta = p[self.M*self.Q]
self.kern._set_params(p[self.Z.size + 1:])
self._computations()
def _get_params(self):
return np.hstack([self.Z.flatten(),self.beta,self.kern._get_params_transformed()])
def _get_param_names(self):
return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern._get_param_names_transformed()
def log_likelihood(self): def log_likelihood(self):
""" Compute the (lower bound on the) log marginal likelihood """ """ Compute the (lower bound on the) log marginal likelihood """
sf2 = self.scale_factor**2 sf2 = self.scale_factor**2
@ -116,18 +130,9 @@ class sparse_GP_regression(GP_regression):
D = +0.5*np.sum(self.psi1VVpsi1 * self.C) D = +0.5*np.sum(self.psi1VVpsi1 * self.C)
return A+B+C+D return A+B+C+D
def set_param(self, p): def _log_likelihood_gradients(self):
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()])
self.beta = p[self.M*self.Q]
self.kern.set_param(p[self.Z.size + 1:])
self._computations()
def get_param(self):
return np.hstack([self.Z.flatten(),self.beta,self.kern.extract_param()])
def get_param_names(self):
return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern.extract_param_names()
def dL_dbeta(self): def dL_dbeta(self):
""" """
Compute the gradient of the log likelihood wrt beta. Compute the gradient of the log likelihood wrt beta.
@ -172,9 +177,6 @@ class sparse_GP_regression(GP_regression):
dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X) dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X)
return dL_dZ return dL_dZ
def log_likelihood_gradients(self):
return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()])
def _raw_predict(self, Xnew, slices, full_cov=False): def _raw_predict(self, Xnew, slices, full_cov=False):
"""Internal helper function for making predictions, does not account for normalisation""" """Internal helper function for making predictions, does not account for normalisation"""