From 829e40b25c6735ad3673eb1c29da265b5ea058b0 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 9 Oct 2014 10:34:01 +0100 Subject: [PATCH] [missing_data in sparse gp] can be extended towards missing_data handling in gp itself. Setting up gpy issue --- GPy/core/parameterization/param.py | 2 ++ GPy/core/parameterization/parameter_core.py | 4 ++-- GPy/core/sparse_gp.py | 6 +++--- GPy/examples/dimensionality_reduction.py | 3 --- GPy/models/bayesian_gplvm.py | 11 ++++++++--- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index cb9b0cfe..c0836355 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -53,7 +53,9 @@ class Param(Parameterizable, ObsAr): return obj def __init__(self, name, input_array, default_constraint=None, *a, **kw): + self._in_init_ = True super(Param, self).__init__(name=name, default_constraint=default_constraint, *a, **kw) + self._in_init_ = False def build_pydot(self,G): import pydot diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 8d1efb1c..67d74626 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -18,7 +18,7 @@ import numpy as np import re import logging -__updated__ = '2014-09-22' +__updated__ = '2014-10-09' class HierarchyError(Exception): """ @@ -99,7 +99,7 @@ class Observable(object): :param bool trigger_parent: Whether to trigger the parent, after self has updated """ - if not self.update_model(): + if not self.update_model() or self._in_init_: #print "Warning: updates are off, updating the model will do nothing" return self._trigger_params_changed(trigger_parent) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index ecc1e4ba..8ea0d4c6 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -218,6 +218,7 @@ class SparseGP(GP): print message, for d in xrange(self.output_dim): ninan = self.ninan[:, d] + print ' '*(len(message)) + '\r', message = m_f(d) print message, @@ -249,9 +250,8 @@ class SparseGP(GP): if self.missing_data: self._outer_loop_for_missing_data() else: - self.posterior, self._log_marginal_likelihood, self.grad_dict, gradients, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata) - self.kern.gradient = gradients['kerngrad'] - self.Z.gradient = gradients['Zgrad'] + self.posterior, self._log_marginal_likelihood, self.grad_dict, full_values, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata) + self._outer_values_update(full_values) def _raw_predict(self, Xnew, full_cov=False, kern=None): """ diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index bf99dd66..94e83ff1 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -374,9 +374,6 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, m = BayesianGPLVM(Ymissing, Q, init="random", num_inducing=num_inducing, kernel=k, missing_data=True) - m.X.variance[:] = _np.random.uniform(0,.1,m.X.shape) - m.likelihood.variance = .01 - m.parameters_changed() m.Yreal = Y if optimize: diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 54c11fea..67cb6e62 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -32,7 +32,7 @@ class BayesianGPLVM(SparseGP_MPI): self.__IN_OPTIMIZATION__ = False self.logger = logging.getLogger(self.__class__.__name__) - if X == None: + if X is None: from ..util.initialization import initialize_latent self.logger.info("initializing latent space X with method {}".format(init)) X, fracs = initialize_latent(init, input_dim, Y) @@ -97,14 +97,19 @@ class BayesianGPLVM(SparseGP_MPI): Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) + + # Subsetting Variational Posterior objects, makes the gradients + # empty. We need them to be 0 though: X.mean.gradient[:] = 0 X.variance.gradient[:] = 0 + self.variational_prior.update_gradients_KL(X) current_values['meangrad'] += X.mean.gradient current_values['vargrad'] += X.variance.gradient - value_indices['meangrad'] = subset_indices['samples'] - value_indices['vargrad'] = subset_indices['samples'] + if subset_indices is not None: + value_indices['meangrad'] = subset_indices['samples'] + value_indices['vargrad'] = subset_indices['samples'] return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices def _outer_values_update(self, full_values):