mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 20:12:38 +02:00
[mrd] fixes for updates on psi2
This commit is contained in:
parent
00f514b451
commit
79dd821424
2 changed files with 27 additions and 29 deletions
|
|
@ -100,22 +100,8 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||||
dL_dpsi2=full_values['dL_dpsi2'],
|
dL_dpsi2=full_values['dL_dpsi2'],
|
||||||
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||||
|
|
||||||
kl_fctr = self.kl_factr
|
self.X.mean.gradient = meangrad_tmp
|
||||||
|
self.X.variance.gradient = vargrad_tmp
|
||||||
self.X.mean.gradient[:] = 0
|
|
||||||
self.X.variance.gradient[:] = 0
|
|
||||||
self.variational_prior.update_gradients_KL(self.X)
|
|
||||||
|
|
||||||
if self.missing_data or not self.stochastics:
|
|
||||||
self.X.mean.gradient = kl_fctr*self.X.mean.gradient
|
|
||||||
self.X.variance.gradient = kl_fctr*self.X.variance.gradient
|
|
||||||
else:
|
|
||||||
d = self.output_dim
|
|
||||||
self.X.mean.gradient = kl_fctr*self.X.mean.gradient*self.stochastics.batchsize/d
|
|
||||||
self.X.variance.gradient = kl_fctr*self.X.variance.gradient*self.stochastics.batchsize/d
|
|
||||||
self.X.mean.gradient += meangrad_tmp
|
|
||||||
self.X.variance.gradient += vargrad_tmp
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
|
self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
|
||||||
self.X.gradient += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
|
self.X.gradient += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
|
||||||
|
|
@ -126,15 +112,27 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
||||||
kl_fctr = self.kl_factr
|
|
||||||
if self.missing_data or not self.stochastics:
|
|
||||||
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
|
|
||||||
elif self.stochastics:
|
|
||||||
d = self.output_dim
|
|
||||||
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
|
|
||||||
|
|
||||||
if isinstance(self.inference_method, VarDTC_minibatch):
|
kl_fctr = self.kl_factr
|
||||||
return
|
if kl_fctr > 0:
|
||||||
|
Xgrad = self.X.gradient.copy()
|
||||||
|
self.X.gradient[:] = 0
|
||||||
|
self.variational_prior.update_gradients_KL(self.X)
|
||||||
|
|
||||||
|
if self.missing_data or not self.stochastics:
|
||||||
|
self.X.mean.gradient = kl_fctr*self.X.mean.gradient
|
||||||
|
self.X.variance.gradient = kl_fctr*self.X.variance.gradient
|
||||||
|
else:
|
||||||
|
d = self.output_dim
|
||||||
|
self.X.mean.gradient = kl_fctr*self.X.mean.gradient*self.stochastics.batchsize/d
|
||||||
|
self.X.variance.gradient = kl_fctr*self.X.variance.gradient*self.stochastics.batchsize/d
|
||||||
|
self.X.gradient += Xgrad
|
||||||
|
|
||||||
|
if self.missing_data or not self.stochastics:
|
||||||
|
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
|
||||||
|
elif self.stochastics:
|
||||||
|
d = self.output_dim
|
||||||
|
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
|
||||||
|
|
||||||
def plot_latent(self, labels=None, which_indices=None,
|
def plot_latent(self, labels=None, which_indices=None,
|
||||||
resolution=50, ax=None, marker='o', s=40,
|
resolution=50, ax=None, marker='o', s=40,
|
||||||
|
|
|
||||||
|
|
@ -170,14 +170,14 @@ class MRD(BayesianGPLVMMiniBatch):
|
||||||
self._log_marginal_likelihood += b._log_marginal_likelihood
|
self._log_marginal_likelihood += b._log_marginal_likelihood
|
||||||
|
|
||||||
self.logger.info('working on im <{}>'.format(hex(id(i))))
|
self.logger.info('working on im <{}>'.format(hex(id(i))))
|
||||||
self.Z.gradient[:] += b.full_values['Zgrad']
|
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
|
||||||
grad_dict = b.full_values
|
#grad_dict = b.full_values
|
||||||
|
|
||||||
if self.has_uncertain_inputs():
|
if self.has_uncertain_inputs():
|
||||||
self.X.mean.gradient += grad_dict['meangrad']
|
self.X.mean.gradient += b.X.mean.gradient
|
||||||
self.X.variance.gradient += grad_dict['vargrad']
|
self.X.variance.gradient += b.X.variance.gradient
|
||||||
else:
|
else:
|
||||||
self.X.gradient += grad_dict['Xgrad']
|
self.X.gradient += b.X.gradient
|
||||||
|
|
||||||
if self.has_uncertain_inputs():
|
if self.has_uncertain_inputs():
|
||||||
# update for the KL divergence
|
# update for the KL divergence
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue