merged variational posterior changes

This commit is contained in:
Max Zwiessele 2014-02-26 08:30:27 +00:00
commit d29fa56af2
43 changed files with 1424 additions and 1936 deletions

View file

@ -45,34 +45,47 @@ class SparseGP(GP):
self.Z = Param('inducing inputs', Z)
self.num_inducing = Z.shape[0]
if not (X_variance is None):
self.q = NormalPosterior(X, X_variance)
GP.__init__(self, self.q.mean, Y, kernel, likelihood, inference_method=inference_method, name=name)
else:
self.X = X
GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name)
GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name)
self.add_parameter(self.Z, index=0)
self.parameters_changed()
def has_uncertain_inputs(self):
return hasattr(self, 'q')
return isinstance(self.X, VariationalPosterior)
def parameters_changed(self):
if self.has_uncertain_inputs():
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference_latent(self.kern, self.q, self.Z, self.likelihood, self.Y)
# gradients
self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood'))
self.kern.update_gradients_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict)
self.Z.gradient = self.kern.gradients_Z_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict)
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y)
self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood'))
if isinstance(self.X, VariationalPosterior):
#gradients wrt kernel
dL_dKmm = self.grad_dict.pop('dL_dKmm')
self.kern.update_gradients_full(dL_dKmm, self.Z, None)
target = np.zeros(self.kern.size)
self.kern._collect_gradient(target)
self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict)
self.kern._collect_gradient(target)
self.kern._set_gradient(target)
#gradients wrt Z
self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z)
self.Z.gradient += self.kern.gradients_Z_expectations(
self.grad_dict['dL_dpsi1'], self.grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X)
else:
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, None, self.Z, self.likelihood, self.Y)
# gradients
self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood'))
self.kern.update_gradients_sparse(X=self.X, Z=self.Z, **self.grad_dict)
self.Z.gradient = self.kern.gradients_Z_sparse(X=self.X, Z=self.Z, **self.grad_dict)
#gradients wrt kernel
target = np.zeros(self.kern.size)
self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X)
self.kern._collect_gradient(target)
self.kern.update_gradients_full(self.grad_dict['dL_dKnm'], self.X, self.Z)
self.kern._collect_gradient(target)
self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z, None)
self.kern._collect_gradient(target)
self.kern._set_gradient(target)
#gradients wrt Z
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X)
def _raw_predict(self, Xnew, X_variance_new=None, full_cov=False):
"""
Make a prediction for the latent function values