corrected caching for psi derivatives

This commit is contained in:
Alan Saul 2015-09-01 19:17:56 +03:00
parent 3818aa3745
commit 50b9e4dc82
4 changed files with 29 additions and 34 deletions

View file

@ -9,6 +9,7 @@ from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_miniba
import logging
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
from GPy.core.parameterization.param import Param
from GPy.core.parameterization.observable_array import ObsAr
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""
@ -134,17 +135,6 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
full_values['Xgrad'] = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
full_values['Xgrad'] += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
#kl_fctr = self.kl_factr
#if self.has_uncertain_inputs():
#self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
#self.X.mean.gradient[:] = 0
#self.X.variance.gradient[:] = 0
#self.variational_prior.update_gradients_KL(self.X)
if self.has_uncertain_inputs():
self.X.mean.gradient = full_values['meangrad']
self.X.variance.gradient = full_values['vargrad']
@ -155,13 +145,15 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
full_values = super(BayesianGPLVMMiniBatch, self)._outer_init_full_values()
full_values['meangrad'] = np.zeros((self.X.shape[0], self.X.shape[1]))
full_values['vargrad'] = np.zeros((self.X.shape[0], self.X.shape[1]))
full_values['dL_dpsi0'] = np.zeros(self.X.shape[0])
full_values['dL_dpsi1'] = np.zeros((self.X.shape[0], self.Z.shape[0]))
full_values['dL_dpsi2'] = np.zeros((self.Z.shape[0], self.Z.shape[0]))
full_values['Lpsi0'] = np.zeros(self.X.shape[0])
full_values['Lpsi1'] = np.zeros((self.X.shape[0], self.Z.shape[0]))
full_values['Lpsi2'] = np.zeros((self.X.shape[0], self.Z.shape[0], self.Z.shape[0]))
#FIXME Hack
full_values['dL_dpsi0'] = ObsAr(np.zeros(self.X.shape[0]))
full_values['dL_dpsi1'] = ObsAr(np.zeros((self.X.shape[0], self.Z.shape[0])))
full_values['dL_dpsi2'] = ObsAr(np.zeros((self.Z.shape[0], self.Z.shape[0])))
full_values['Lpsi0'] = ObsAr(np.zeros(self.X.shape[0]))
full_values['Lpsi1'] = ObsAr(np.zeros((self.X.shape[0], self.Z.shape[0])))
full_values['Lpsi2'] = ObsAr(np.zeros((self.X.shape[0], self.Z.shape[0], self.Z.shape[0])))
return full_values
def parameters_changed(self):

View file

@ -147,7 +147,11 @@ class SparseGPMiniBatch(SparseGP):
if np.isscalar(current_values[key]):
full_values[key] += current_values[key]
else:
full_values[key][index] += current_values[key]
from ..core.parameterization.observable_array import ObsAr
if isinstance(full_values[key], ObsAr):
full_values[key].values[index] += current_values[key]
else:
full_values[key][index] += current_values[key]
else:
full_values[key] = current_values[key]
@ -178,11 +182,6 @@ class SparseGPMiniBatch(SparseGP):
dL_dpsi2=full_values['dL_dpsi2'],
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2,
Lpsi0=full_values['Lpsi0'], Lpsi1=full_values['Lpsi1'], Lpsi2=full_values['Lpsi2'])
#self.kern.update_gradients_expectations(variational_posterior=self.X,
#Z=self.Z,
#dL_dpsi0=full_values['dL_dpsi0'],
#dL_dpsi1=full_values['dL_dpsi1'],
#dL_dpsi2=full_values['dL_dpsi2'])
full_values['kerngrad'] += self.kern.gradient
#gradients wrt Z
@ -251,9 +250,10 @@ class SparseGPMiniBatch(SparseGP):
#Compute the psi statistics for N once, but don't sum out N in psi2
if self.has_uncertain_inputs():
self.kern.return_psi2_n = True
psi0 = self.kern.psi0(self.Z, self.X)
psi1 = self.kern.psi1(self.Z, self.X)
psi2 = self.kern.psi2(self.Z, self.X)
from ..core.parameterization.observable_array import ObsAr
psi0 = ObsAr(self.kern.psi0(self.Z, self.X))
psi1 = ObsAr(self.kern.psi1(self.Z, self.X))
psi2 = ObsAr(self.kern.psi2(self.Z, self.X))
else:
psi0 = self.kern.Kdiag(self.X)
psi1 = self.kern.K(self.X, self.Z)