mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
[vardtc] sparse gplvm in bayesian gplvm minibatch
This commit is contained in:
parent
865d8e3851
commit
4fc006f45d
2 changed files with 57 additions and 66 deletions
|
|
@ -8,6 +8,7 @@ from ..core.parameterization.variational import NormalPosterior, NormalPrior
|
|||
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
|
||||
import logging
|
||||
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
|
||||
from GPy.core.parameterization.param import Param
|
||||
|
||||
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||
"""
|
||||
|
|
@ -35,15 +36,20 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
|
||||
self.init = init
|
||||
|
||||
if X_variance is None:
|
||||
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
|
||||
X_variance = np.random.uniform(0,.1,X.shape)
|
||||
|
||||
if Z is None:
|
||||
self.logger.info("initializing inducing inputs")
|
||||
Z = np.random.permutation(X.copy())[:num_inducing]
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
if X_variance == False:
|
||||
self.logger.info('no variance on X, activating sparse GPLVM')
|
||||
X = Param("latent space", X)
|
||||
elif X_variance is None:
|
||||
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
|
||||
X_variance = np.random.uniform(0,.1,X.shape)
|
||||
self.variational_prior = NormalPrior()
|
||||
X = NormalPosterior(X, X_variance)
|
||||
|
||||
if kernel is None:
|
||||
self.logger.info("initializing kernel RBF")
|
||||
kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim)
|
||||
|
|
@ -51,9 +57,6 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
if likelihood is None:
|
||||
likelihood = Gaussian()
|
||||
|
||||
self.variational_prior = NormalPrior()
|
||||
X = NormalPosterior(X, X_variance)
|
||||
|
||||
self.kl_factr = 1.
|
||||
|
||||
if inference_method is None:
|
||||
|
|
@ -83,20 +86,26 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None):
|
||||
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices)
|
||||
|
||||
if self.has_uncertain_inputs():
|
||||
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
|
||||
variational_posterior=X,
|
||||
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
|
||||
dL_dpsi1=grad_dict['dL_dpsi1'],
|
||||
dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
else:
|
||||
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
|
||||
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
|
||||
if subset_indices is not None:
|
||||
value_indices['Xgrad'] = subset_indices['samples']
|
||||
|
||||
kl_fctr = self.kl_factr
|
||||
if self.has_uncertain_inputs():
|
||||
if self.missing_data:
|
||||
d = self.output_dim
|
||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
|
||||
else:
|
||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
|
||||
|
||||
|
||||
# Subsetting Variational Posterior objects, makes the gradients
|
||||
# empty. We need them to be 0 though:
|
||||
X.mean.gradient[:] = 0
|
||||
|
|
@ -121,42 +130,24 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
E.g. set the gradients of parameters, etc.
|
||||
"""
|
||||
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
|
||||
if self.has_uncertain_inputs():
|
||||
self.X.mean.gradient = full_values['meangrad']
|
||||
self.X.variance.gradient = full_values['vargrad']
|
||||
else:
|
||||
self.X.gradient = full_values['Xgrad']
|
||||
|
||||
def _outer_init_full_values(self):
|
||||
if self.has_uncertain_inputs():
|
||||
return dict(meangrad=np.zeros(self.X.mean.shape),
|
||||
vargrad=np.zeros(self.X.variance.shape))
|
||||
else:
|
||||
return dict(Xgrad=np.zeros(self.X.shape))
|
||||
|
||||
def parameters_changed(self):
|
||||
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
||||
if isinstance(self.inference_method, VarDTC_minibatch):
|
||||
return
|
||||
|
||||
#super(BayesianGPLVM, self).parameters_changed()
|
||||
#self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
||||
|
||||
#self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
|
||||
|
||||
# This is testing code -------------------------
|
||||
# i = np.random.randint(self.X.shape[0])
|
||||
# X_ = self.X.mean
|
||||
# which = np.sqrt(((X_ - X_[i:i+1])**2).sum(1)).argsort()>(max(0, self.X.shape[0]-51))
|
||||
# _, _, grad_dict = self.inference_method.inference(self.kern, self.X[which], self.Z, self.likelihood, self.Y[which], self.Y_metadata)
|
||||
# grad = self.kern.gradients_qX_expectations(variational_posterior=self.X[which], Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
#
|
||||
# self.X.mean.gradient[:] = 0
|
||||
# self.X.variance.gradient[:] = 0
|
||||
# self.X.mean.gradient[which] = grad[0]
|
||||
# self.X.variance.gradient[which] = grad[1]
|
||||
|
||||
# update for the KL divergence
|
||||
# self.variational_prior.update_gradients_KL(self.X, which)
|
||||
# -----------------------------------------------
|
||||
|
||||
# update for the KL divergence
|
||||
#self.variational_prior.update_gradients_KL(self.X)
|
||||
|
||||
def plot_latent(self, labels=None, which_indices=None,
|
||||
resolution=50, ax=None, marker='o', s=40,
|
||||
fignum=None, plot_inducing=True, legend=True,
|
||||
|
|
|
|||
|
|
@ -111,9 +111,6 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
|
||||
kernels = kernel
|
||||
|
||||
if X_variance is None:
|
||||
X_variance = np.random.uniform(0.1, 0.2, X.shape)
|
||||
|
||||
self.variational_prior = NormalPrior()
|
||||
#self.X = NormalPosterior(X, X_variance)
|
||||
|
||||
|
|
@ -174,10 +171,13 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
self.Z.gradient[:] += b.full_values['Zgrad']
|
||||
grad_dict = b.full_values
|
||||
|
||||
if self.has_uncertain_inputs():
|
||||
self.X.mean.gradient += grad_dict['meangrad']
|
||||
self.X.variance.gradient += grad_dict['vargrad']
|
||||
else:
|
||||
self.X.gradient += grad_dict['Xgrad']
|
||||
|
||||
if isinstance(self.X, VariationalPosterior):
|
||||
if self.has_uncertain_inputs():
|
||||
# update for the KL divergence
|
||||
self.variational_prior.update_gradients_KL(self.X)
|
||||
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue