mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
[spgp minibatch] linear calls the right psicomps and add kernel
This commit is contained in:
parent
bf3c579cc6
commit
8fa5b67089
5 changed files with 16 additions and 13 deletions
|
|
@ -119,9 +119,10 @@ class SparseGP(GP):
|
|||
if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of,
|
||||
we take only the diagonal elements across N.
|
||||
|
||||
For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we
|
||||
return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD).
|
||||
This is for both with and without missing data. See for missing data SparseGP implementation py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.
|
||||
For uncertain inputs, the SparseGP bound produces cannot predict the full covariance matrix full_cov for now.
|
||||
The implementation of that will follow. However, for each dimension the
|
||||
covariance changes, so if full_cov is False (standard), we return the variance
|
||||
for each dimension [NxD].
|
||||
"""
|
||||
|
||||
if kern is None: kern = self.kern
|
||||
|
|
@ -158,6 +159,7 @@ class SparseGP(GP):
|
|||
mu = np.dot(psi1_star, la) # TODO: dimensions?
|
||||
|
||||
if full_cov:
|
||||
raise NotImplementedError, "Full covariance for Sparse GP predicted with uncertain inputs not implemented yet."
|
||||
var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1]))
|
||||
di = np.diag_indices(la.shape[1])
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -85,15 +85,15 @@ class Add(CombinationKernel):
|
|||
[target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts]
|
||||
return target
|
||||
|
||||
@Cache_this(limit=2, force_kwargs=['which_parts'])
|
||||
@Cache_this(limit=1, force_kwargs=['which_parts'])
|
||||
def psi0(self, Z, variational_posterior):
|
||||
return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))
|
||||
|
||||
@Cache_this(limit=2, force_kwargs=['which_parts'])
|
||||
@Cache_this(limit=1, force_kwargs=['which_parts'])
|
||||
def psi1(self, Z, variational_posterior):
|
||||
return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))
|
||||
|
||||
@Cache_this(limit=2, force_kwargs=['which_parts'])
|
||||
@Cache_this(limit=1, force_kwargs=['which_parts'])
|
||||
def psi2(self, Z, variational_posterior):
|
||||
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
|
||||
#return psi2
|
||||
|
|
@ -128,7 +128,7 @@ class Add(CombinationKernel):
|
|||
raise NotImplementedError("psi2 cannot be computed for this kernel")
|
||||
return psi2
|
||||
|
||||
@Cache_this(limit=2, force_kwargs=['which_parts'])
|
||||
@Cache_this(limit=1, force_kwargs=['which_parts'])
|
||||
def psi2n(self, Z, variational_posterior):
|
||||
psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))
|
||||
#return psi2
|
||||
|
|
|
|||
|
|
@ -125,6 +125,9 @@ class Linear(Kern):
|
|||
def psi2(self, Z, variational_posterior):
|
||||
return self.psicomp.psicomputations(self.variances, Z, variational_posterior)[2]
|
||||
|
||||
def psi2n(self, Z, variational_posterior):
|
||||
return self.psicomp.psicomputations(self.variances, Z, variational_posterior, return_psi2_n=True)[2]
|
||||
|
||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
dL_dvar = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variances, Z, variational_posterior)[0]
|
||||
if self.ARD:
|
||||
|
|
|
|||
|
|
@ -35,9 +35,9 @@ class PSICOMP_RBF(Pickleable):
|
|||
class PSICOMP_Linear(Pickleable):
|
||||
|
||||
@Cache_this(limit=2, ignore_args=(0,))
|
||||
def psicomputations(self, variance, Z, variational_posterior):
|
||||
def psicomputations(self, variance, Z, variational_posterior, return_psi2_n=False):
|
||||
if isinstance(variational_posterior, variational.NormalPosterior):
|
||||
return linear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
||||
return linear_psi_comp.psicomputations(variance, Z, variational_posterior, return_psi2_n=return_psi2_n)
|
||||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -171,8 +171,7 @@ class SparseGPMiniBatch(SparseGP):
|
|||
variational_posterior=self.X,
|
||||
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
|
||||
dL_dpsi1=full_values['dL_dpsi1'],
|
||||
dL_dpsi2=full_values['dL_dpsi2'],
|
||||
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||
dL_dpsi2=full_values['dL_dpsi2'])
|
||||
self.kern.gradient += kgrad
|
||||
|
||||
|
||||
|
|
@ -182,8 +181,7 @@ class SparseGPMiniBatch(SparseGP):
|
|||
variational_posterior=self.X,
|
||||
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
|
||||
dL_dpsi1=full_values['dL_dpsi1'],
|
||||
dL_dpsi2=full_values['dL_dpsi2'],
|
||||
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||
dL_dpsi2=full_values['dL_dpsi2'])
|
||||
else:
|
||||
#gradients wrt kernel
|
||||
self.kern.update_gradients_diag(full_values['dL_dKdiag'], self.X)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue