Modified log_like_gradients to make it _log_like_gradients and moved extract_gradient to _log_like_gradients_transformed.

This commit is contained in:
Neil Lawrence 2013-01-18 13:47:37 +00:00
parent 333d8d3e5f
commit b43db3f8da
9 changed files with 13 additions and 13 deletions

View file

@ -25,7 +25,7 @@ class model(parameterised):
raise NotImplementedError, "this needs to be implemented to utilise the model class" raise NotImplementedError, "this needs to be implemented to utilise the model class"
def log_likelihood(self): def log_likelihood(self):
raise NotImplementedError, "this needs to be implemented to utilise the model class" raise NotImplementedError, "this needs to be implemented to utilise the model class"
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
raise NotImplementedError, "this needs to be implemented to utilise the model class" raise NotImplementedError, "this needs to be implemented to utilise the model class"
def set_prior(self,which,what): def set_prior(self,which,what):
@ -108,7 +108,7 @@ class model(parameterised):
"""evaluate the prior""" """evaluate the prior"""
return np.sum([p.lnpdf(x) for p, x in zip(self.priors,self._get_params()) if p is not None]) return np.sum([p.lnpdf(x) for p, x in zip(self.priors,self._get_params()) if p is not None])
def log_prior_gradients(self): def _log_prior_gradients(self):
"""evaluate the gradients of the priors""" """evaluate the gradients of the priors"""
x = self._get_params() x = self._get_params()
ret = np.zeros(x.size) ret = np.zeros(x.size)
@ -120,7 +120,7 @@ class model(parameterised):
Use self.log_likelihood_gradients and self.prior_gradients to get the gradients of the model. Use self.log_likelihood_gradients and self.prior_gradients to get the gradients of the model.
Adjust the gradient for constraints and ties, return. Adjust the gradient for constraints and ties, return.
""" """
g = self.log_likelihood_gradients() + self.log_prior_gradients() g = self._log_likelihood_gradients() + self._log_prior_gradients()
x = self._get_params() x = self._get_params()
g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices] g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices]
g[self.constrained_negative_indices] = g[self.constrained_negative_indices]*x[self.constrained_negative_indices] g[self.constrained_negative_indices] = g[self.constrained_negative_indices]*x[self.constrained_negative_indices]

View file

@ -99,6 +99,6 @@ class probit(likelihood):
def predictive_mean(self,mu,variance): def predictive_mean(self,mu,variance):
return stats.norm.cdf(mu/np.sqrt(1+variance)) return stats.norm.cdf(mu/np.sqrt(1+variance))
def log_likelihood_gradients(): def _log_likelihood_gradients():
raise NotImplementedError raise NotImplementedError

View file

@ -44,7 +44,7 @@ class GPLVM(GP_regression):
self.X = x[:self.X.size].reshape(self.N,self.Q).copy() self.X = x[:self.X.size].reshape(self.N,self.Q).copy()
GP_regression._set_params(self, x[self.X.size:]) GP_regression._set_params(self, x[self.X.size:])
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
dL_dK = self.dL_dK() dL_dK = self.dL_dK()
dL_dtheta = self.kern.dK_dtheta(dL_dK,self.X) dL_dtheta = self.kern.dK_dtheta(dL_dK,self.X)

View file

@ -78,7 +78,7 @@ class GP_EP(model):
L3 = sum(np.log(self.ep_approx.Z_hat)) L3 = sum(np.log(self.ep_approx.Z_hat))
return L1 + L2A + L2B + L3 return L1 + L2A + L2B + L3
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
dK_dp = self.kernel.dK_dtheta(self.X) dK_dp = self.kernel.dK_dtheta(self.X)
self.dK_dp = dK_dp self.dK_dp = dK_dp
aux1,info_1 = linalg.flapack.dtrtrs(self.L,np.dot(self.Sroot_tilde_K,self.ep_approx.v_tilde),lower=1) aux1,info_1 = linalg.flapack.dtrtrs(self.L,np.dot(self.Sroot_tilde_K,self.ep_approx.v_tilde),lower=1)

View file

@ -103,7 +103,7 @@ class GP_regression(model):
return dL_dK return dL_dK
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
return self.kern.dK_dtheta(partial=self.dL_dK(),X=self.X) return self.kern.dK_dtheta(partial=self.dL_dK(),X=self.X)
def predict(self,Xnew, slices=None, full_cov=False): def predict(self,Xnew, slices=None, full_cov=False):

View file

@ -99,7 +99,7 @@ class generalized_FITC(model):
E = .5*np.sum((self.ep_approx.v_/self.ep_approx.tau_ - self.mu_tilde.flatten())**2/(1./self.ep_approx.tau_ + 1./self.ep_approx.tau_tilde)) E = .5*np.sum((self.ep_approx.v_/self.ep_approx.tau_ - self.mu_tilde.flatten())**2/(1./self.ep_approx.tau_ + 1./self.ep_approx.tau_tilde))
return A + B + C + D + E return A + B + C + D + E
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
dKmm_dtheta = self.kernel.dK_dtheta(self.Z) dKmm_dtheta = self.kernel.dK_dtheta(self.Z)
dKnn_dtheta = self.kernel.dK_dtheta(self.X) dKnn_dtheta = self.kernel.dK_dtheta(self.X)
dKmn_dtheta = self.kernel.dK_dtheta(self.Z,self.X) dKmn_dtheta = self.kernel.dK_dtheta(self.Z,self.X)

View file

@ -49,8 +49,8 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM):
return dL_dX return dL_dX
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
return np.hstack((self.dL_dX().flatten(), sparse_GP_regression.log_likelihood_gradients(self))) return np.hstack((self.dL_dX().flatten(), sparse_GP_regression._log_likelihood_gradients(self)))
def plot(self): def plot(self):
GPLVM.plot(self) GPLVM.plot(self)

View file

@ -168,7 +168,7 @@ class sparse_GP_regression(GP_regression):
dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X) dL_dZ += self.kern.dK_dX(dL_dpsi1,self.Z,self.X)
return dL_dZ return dL_dZ
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()]) return np.hstack([self.dL_dZ().flatten(), self.dL_dbeta(), self.dL_dtheta()])
def _raw_predict(self, Xnew, slices, full_cov=False): def _raw_predict(self, Xnew, slices, full_cov=False):

View file

@ -59,8 +59,8 @@ class warpedGP(GP_regression):
jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params) jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params)
return ll + np.log(jacobian).sum() return ll + np.log(jacobian).sum()
def log_likelihood_gradients(self): def _log_likelihood_gradients(self):
ll_grads = GP_regression.log_likelihood_gradients(self) ll_grads = GP_regression._log_likelihood_gradients(self)
alpha = np.dot(self.Ki, self.Y.flatten()) alpha = np.dot(self.Ki, self.Y.flatten())
warping_grads = self.warping_function_gradients(alpha) warping_grads = self.warping_function_gradients(alpha)
return np.hstack((warping_grads.flatten(), ll_grads.flatten())) return np.hstack((warping_grads.flatten(), ll_grads.flatten()))