diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 56400d91..7f2d688d 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -112,6 +112,7 @@ class SparseGP(GP): #gradients wrt Z self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X) + self._Zgrad = self.Z.gradient.copy() def _raw_predict(self, Xnew, full_cov=False, kern=None): diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 745c2c24..9d6686f9 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -405,10 +405,8 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim) - - # Ylist = [Ylist[0]] - k = kern.Linear(Q, ARD=True) + k = kern.Linear(Q) + kern.White(Q, variance=1e-4) m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="PCA_concat", initz='permute', **kw) m['.*noise'] = [Y.var() / 40. for Y in Ylist] @@ -428,8 +426,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) - # Ylist = [Ylist[0]] - k = kern.Linear(Q, ARD=True) + k = kern.Linear(Q) + kern.White(Q, variance=1e-4) inanlist = [] for Y in Ylist: diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 3ac703fe..88123227 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -97,7 +97,7 @@ class BayesianGPLVM(SparseGP_MPI): dL_dpsi2=self.grad_dict['dL_dpsi2']) self.variational_prior.update_gradients_KL(self.X) - + self._Xgrad = self.X.gradient.copy() #super(BayesianGPLVM, self).parameters_changed() #self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index cb98e1a8..8f098a1b 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -15,6 +15,7 @@ from ..util.initialization import initialize_latent from ..core.sparse_gp import SparseGP, GP from GPy.core.parameterization.variational import VariationalPosterior from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch +from GPy.models.bayesian_gplvm import BayesianGPLVM from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch class MRD(BayesianGPLVMMiniBatch): @@ -170,7 +171,8 @@ class MRD(BayesianGPLVMMiniBatch): self._log_marginal_likelihood += b._log_marginal_likelihood self.logger.info('working on im <{}>'.format(hex(id(i)))) - self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad'] + self.Z.gradient[:] += b._Zgrad # b.Z.gradient # full_values['Zgrad'] + #grad_dict = b.full_values if self.has_uncertain_inputs(): diff --git a/GPy/models/sparse_gp_minibatch.py b/GPy/models/sparse_gp_minibatch.py index be068a0e..54160e6f 100644 --- a/GPy/models/sparse_gp_minibatch.py +++ b/GPy/models/sparse_gp_minibatch.py @@ -321,3 +321,4 @@ class SparseGPMiniBatch(SparseGP): else: self.posterior, self._log_marginal_likelihood, self.grad_dict = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata) self._outer_values_update(self.grad_dict) + self._Zgrad = self.Z.gradient.copy()