diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index a7b26a80..5fe63052 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -29,3 +29,29 @@ class Normal(Parameterized): assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ...plotting.matplot_dep import variational_plots return variational_plots.plot(self,*args) + + +class SpikeAndSlab(Parameterized): + ''' + The SpikeAndSlab distribution for variational approximations. + ''' + def __init__(self, means, variances, binary_prob, name='latent space'): + """ + binary_prob : the probability of the distribution on the slab part. + """ + Parameterized.__init__(self, name=name) + self.mean = Param("mean", means) + self.variance = Param('variance', variances, Logexp()) + self.gamma = Param("binary_prob",binary_prob,) + self.add_parameters(self.mean, self.variance, self.gamma) + + def plot(self, *args): + """ + Plot latent space X in 1D: + + See GPy.plotting.matplot_dep.variational_plots + """ + import sys + assert "matplotlib" in sys.modules, "matplotlib package has not been imported." + from ...plotting.matplot_dep import variational_plots + return variational_plots.plot(self,*args) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 61a664fe..71053867 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -57,11 +57,14 @@ class SparseGP(GP): return not (self.X_variance is None) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y) + if self.has_uncertain_inputs(): + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference_latent(self.kern, self.q, self.Z, self.likelihood, self.Y) + else: + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y) self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood')) if self.has_uncertain_inputs(): - self.kern.update_gradients_variational(mu=self.X, S=self.X_variance, Z=self.Z, **self.grad_dict) - self.Z.gradient = self.kern.gradients_Z_variational(mu=self.X, S=self.X_variance, Z=self.Z, **self.grad_dict) + self.kern.update_gradients_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) + self.Z.gradient = self.kern.gradients_Z_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) else: self.kern.update_gradients_sparse(X=self.X, Z=self.Z, **self.grad_dict) self.Z.gradient = self.kern.gradients_Z_sparse(X=self.X, Z=self.Z, **self.grad_dict) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index a81bb711..c2f179ac 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -43,9 +43,20 @@ class VarDTC(object): return Y * prec # TODO chache this, and make it effective def inference(self, kern, X, X_variance, Z, likelihood, Y): + """Inference for normal sparseGP""" + uncertain_inputs = False + psi0, psi1, psi2 = _compute_psi(kern, X, X_variance, Z, uncertain_inputs) + return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) + + def inference_latent(self, kern, posterior_variational, Z, likelihood, Y): + """Inference for GPLVM with uncertain inputs""" + uncertain_inputs = True + psi0, psi1, psi2 = _compute_psi_latent(kern, posterior_variational, Z) + return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) + + def _inference(self, kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs): #see whether we're using variational uncertain inputs - uncertain_inputs = not (X_variance is None) _, output_dim = Y.shape @@ -62,10 +73,9 @@ class VarDTC(object): # do the inference: het_noise = beta.size < 1 num_inducing = Z.shape[0] - num_data = X.shape[0] + num_data = Y.shape[0] # kernel computations, using BGPLVM notation - Kmm = kern.K(Z) - psi0, psi1, psi2 = _compute_psi(kern, X, X_variance, Z, uncertain_inputs) + Kmm = kern.K(Z) Lm = jitchol(Kmm) @@ -191,20 +201,31 @@ class VarDTCMissingData(object): else: self._subarray_indices = [[slice(None),slice(None)]] return [Y], [(Y**2).sum()] - + def inference(self, kern, X, X_variance, Z, likelihood, Y): + """Inference for normal sparseGP""" + uncertain_inputs = False + psi0, psi1, psi2 = _compute_psi(kern, X, X_variance, Z, uncertain_inputs) + return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) + + def inference_latent(self, kern, posterior_variational, Z, likelihood, Y): + """Inference for GPLVM with uncertain inputs""" + uncertain_inputs = True + psi0, psi1, psi2 = _compute_psi_latent(kern, posterior_variational, Z) + return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) + + def _inference(self, kern, psi0_all, psi1_all, psi2_all, Z, likelihood, Y, uncertain_inputs): Ys, traces = self._Y(Y) beta_all = 1./likelihood.variance - uncertain_inputs = not (X_variance is None) het_noise = beta_all.size != 1 import itertools num_inducing = Z.shape[0] - dL_dpsi0_all = np.zeros(X.shape[0]) - dL_dpsi1_all = np.zeros((X.shape[0], num_inducing)) + dL_dpsi0_all = np.zeros(Y.shape[0]) + dL_dpsi1_all = np.zeros((Y.shape[0], num_inducing)) if uncertain_inputs: - dL_dpsi2_all = np.zeros((X.shape[0], num_inducing, num_inducing)) + dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing)) partial_for_likelihood = 0 woodbury_vector = np.zeros((num_inducing, Y.shape[1])) @@ -217,9 +238,6 @@ class VarDTCMissingData(object): Lm = jitchol(Kmm) if uncertain_inputs: LmInv = dtrtri(Lm) - # kernel computations, using BGPLVM notation - psi0_all, psi1_all, psi2_all = _compute_psi(kern, X, X_variance, Z, uncertain_inputs) - VVT_factor_all = np.empty(Y.shape) full_VVT_factor = VVT_factor_all.shape[1] == Y.shape[1] if not full_VVT_factor: @@ -340,15 +358,16 @@ class VarDTCMissingData(object): return post, log_marginal, grad_dict -def _compute_psi(kern, X, X_variance, Z, uncertain_inputs): - if uncertain_inputs: - psi0 = kern.psi0(Z, X, X_variance) - psi1 = kern.psi1(Z, X, X_variance) - psi2 = kern.psi2(Z, X, X_variance) - else: - psi0 = kern.Kdiag(X) - psi1 = kern.K(X, Z) - psi2 = None +def _compute_psi(kern, X, X_variance, Z): + psi0 = kern.Kdiag(X) + psi1 = kern.K(X, Z) + psi2 = None + return psi0, psi1, psi2 + +def _compute_psi_latent(kern, posterior_variational, Z): + psi0 = kern.psi0(Z, posterior_variational) + psi1 = kern.psi1(Z, posterior_variational) + psi2 = kern.psi2(Z, posterior_variational) return psi0, psi1, psi2 def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs): diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index b3ee57cd..5fe29d51 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -26,11 +26,11 @@ class Kern(Parameterized): raise NotImplementedError def Kdiag(self, Xa): raise NotImplementedError - def psi0(self,Z,mu,S): + def psi0(self,Z,posterior_variational): raise NotImplementedError - def psi1(self,Z,mu,S): + def psi1(self,Z,posterior_variational): raise NotImplementedError - def psi2(self,Z,mu,S): + def psi2(self,Z,posterior_variational): raise NotImplementedError def gradients_X(self, dL_dK, X, X2): raise NotImplementedError @@ -49,16 +49,16 @@ class Kern(Parameterized): self._collect_gradient(target) self._set_gradient(target) - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): """Set the gradients of all parameters when doing variational (M) inference with uncertain inputs.""" raise NotImplementedError def gradients_Z_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): grad = self.gradients_X(dL_dKmm, Z) grad += self.gradients_X(dL_dKnm.T, Z, X) return grad - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): raise NotImplementedError - def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): raise NotImplementedError def plot_ARD(self, *args): diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index c4d595d0..0c8588a2 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -79,16 +79,21 @@ class RBF(Kern): ret[:] = self.variance return ret - def psi0(self, Z, mu, S): + def psi0(self, Z, posterior_variational): + mu = posterior_variational.mean ret = np.empty(mu.shape[0], dtype=np.float64) ret[:] = self.variance return ret - def psi1(self, Z, mu, S): + def psi1(self, Z, posterior_variational): + mu = posterior_variational.mean + S = posterior_variational.variance self._psi_computations(Z, mu, S) return self._psi1 - def psi2(self, Z, mu, S): + def psi2(self, Z, posterior_variational): + mu = posterior_variational.mean + S = posterior_variational.variance self._psi_computations(Z, mu, S) return self._psi2 @@ -121,7 +126,9 @@ class RBF(Kern): else: self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + mu = posterior_variational.mean + S = posterior_variational.variance self._psi_computations(Z, mu, S) #contributions from psi0: @@ -155,7 +162,9 @@ class RBF(Kern): else: self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + mu = posterior_variational.mean + S = posterior_variational.variance self._psi_computations(Z, mu, S) #psi1 @@ -173,7 +182,9 @@ class RBF(Kern): return grad - def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + mu = posterior_variational.mean + S = posterior_variational.variance self._psi_computations(Z, mu, S) #psi1 tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom diff --git a/GPy/kern/_src/ss_rbf.py b/GPy/kern/_src/ss_rbf.py deleted file mode 100644 index cab8fd11..00000000 --- a/GPy/kern/_src/ss_rbf.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). -# Licensed under the BSD 3-clause license (see LICENSE.txt) - - -import numpy as np -from kernpart import Kernpart -from ...util.linalg import tdot -from ...util.misc import fast_array_equal, param_to_array -from ...core.parameterization import Param - -class SS_RBF(Kernpart): - """ - The RBF kernel for Spike-and-Slab GPLVM - Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: - - .. math:: - - k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2} - - where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input. - - :param input_dim: the number of input dimensions - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the vector of lengthscale of the kernel - :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter) - :rtype: kernel object - """ - - def __init__(self, input_dim, variance=1., lengthscale=None, name='rbf'): - super(RBF, self).__init__(input_dim, name) - self.input_dim = input_dim - - if lengthscale is not None: - lengthscale = np.asarray(lengthscale) - assert lengthscale.size == self.input_dim, "bad number of lengthscales" - else: - lengthscale = np.ones(self.input_dim) - - self.variance = Param('variance', variance) - self.lengthscale = Param('lengthscale', lengthscale) - self.lengthscale.add_observer(self, self.update_lengthscale) - self.add_parameters(self.variance, self.lengthscale) - self.parameters_changed() # initializes cache - - def on_input_change(self, X): - #self._K_computations(X, None) - pass - - def update_lengthscale(self, l): - self.lengthscale2 = np.square(self.lengthscale) - - def parameters_changed(self): - # reset cached results - self._X, self._X2 = np.empty(shape=(2, 1)) - self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S - - def K(self, X, X2, target): - self._K_computations(X, X2) - target += self.variance * self._K_dvar - - def Kdiag(self, X, target): - np.add(target, self.variance, target) - - def psi0(self, Z, mu, S, target): - target += self.variance - - def psi1(self, Z, mu, S, target): - self._psi_computations(Z, mu, S) - target += self._psi1 - - def psi2(self, Z, mu, S, target): - self._psi_computations(Z, mu, S) - target += self._psi2 - - def update_gradients_full(self, dL_dK, X): - self._K_computations(X, None) - self.variance.gradient = np.sum(self._K_dvar * dL_dK) - if self.ARD: - self.lengthscale.gradient = self._dL_dlengthscales_via_K(dL_dK, X, None) - else: - self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) - - def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - #contributions from Kdiag - self.variance.gradient = np.sum(dL_dKdiag) - - #from Knm - self._K_computations(X, Z) - self.variance.gradient += np.sum(dL_dKnm * self._K_dvar) - if self.ARD: - self.lengthscales.gradient = self._dL_dlengthscales_via_K(dL_dKnm, X, Z) - - else: - self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - - #from Kmm - self._K_computations(Z, None) - self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) - if self.ARD: - self.lengthscales.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) - else: - self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): - self._psi_computations(Z, mu, S) - - #contributions from psi0: - self.variance.gradient = np.sum(dL_dpsi0) - - #from psi1 - self.variance.gradient += np.sum(dL_dpsi1 * self._psi1 / self.variance) - d_length = self._psi1[:,:,None] * ((self._psi1_dist_sq - 1.)/(self.lengthscale*self._psi1_denom) +1./self.lengthscale) - dpsi1_dlength = d_length * dL_dpsi1[:, :, None] - if not self.ARD: - self.lengthscale.gradeint = dpsi1_dlength.sum() - else: - self.lengthscale.gradient = dpsi1_dlength.sum(0).sum(0) - - #from psi2 - d_var = 2.*self._psi2 / self.variance - d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom) - - self.variance.gradient += np.sum(dL_dpsi2 * d_var) - dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None] - if not self.ARD: - self.lengthscale.gradient += dpsi2_dlength.sum() - else: - self.lengthscale.gradient += dpsi2_dlength.sum(0).sum(0).sum(0) - - #from Kmm - self._K_computations(Z, None) - self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) - if self.ARD: - self.lengthscales.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) - else: - self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) - - def gradients_X(self, dL_dK, X, X2, target): - #if self._X is None or X.base is not self._X.base or X2 is not None: - self._K_computations(X, X2) - if X2 is None: - _K_dist = 2*(X[:, None, :] - X[None, :, :]) - else: - _K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena. - gradients_X = (-self.variance / self.lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2)) - target += np.sum(gradients_X * dL_dK.T[:, :, None], 0) - - def dKdiag_dX(self, dL_dKdiag, X, target): - pass - - #---------------------------------------# - # PSI statistics # - #---------------------------------------# - - def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, target_mu, target_S): - pass - - def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target): - self._psi_computations(Z, mu, S) - denominator = (self.lengthscale2 * (self._psi1_denom)) - dpsi1_dZ = -self._psi1[:, :, None] * ((self._psi1_dist / denominator)) - target += np.sum(dL_dpsi1[:, :, None] * dpsi1_dZ, 0) - - def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, target_mu, target_S): - self._psi_computations(Z, mu, S) - tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom - target_mu += np.sum(dL_dpsi1[:, :, None] * tmp * self._psi1_dist, 1) - target_S += np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1) - - def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target): - self._psi_computations(Z, mu, S) - term1 = self._psi2_Zdist / self.lengthscale2 # num_inducing, num_inducing, input_dim - term2 = self._psi2_mudist / self._psi2_denom / self.lengthscale2 # N, num_inducing, num_inducing, input_dim - dZ = self._psi2[:, :, :, None] * (term1[None] + term2) - target += (dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0) - - def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S): - """Think N,num_inducing,num_inducing,input_dim """ - self._psi_computations(Z, mu, S) - tmp = self._psi2[:, :, :, None] / self.lengthscale2 / self._psi2_denom - target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1) - target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1) - - #---------------------------------------# - # Precomputations # - #---------------------------------------# - - def _K_computations(self, X, X2): - #params = self._get_params() - if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2)):# and fast_array_equal(self._params_save , params)): - #self._X = X.copy() - #self._params_save = params.copy() - if X2 is None: - self._X2 = None - X = X / self.lengthscale - Xsquare = np.sum(np.square(X), 1) - self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :]) - else: - self._X2 = X2.copy() - X = X / self.lengthscale - X2 = X2 / self.lengthscale - self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :]) - self._K_dvar = np.exp(-0.5 * self._K_dist2) - - def _dL_dlengthscales_via_K(self, dL_dK, X, X2): - """ - A helper function for update_gradients_* methods - - Computes the derivative of the objective L wrt the lengthscales via - - dL_dl = sum_{i,j}(dL_dK_{ij} dK_dl) - - assumes self._K_computations has just been called. - - This is only valid if self.ARD=True - """ - target = np.zeros(self.input_dim) - dvardLdK = self._K_dvar * dL_dK - var_len3 = self.variance / np.power(self.lengthscale, 3) - if X2 is None: - # save computation for the symmetrical case - dvardLdK = dvardLdK + dvardLdK.T - code = """ - int q,i,j; - double tmp; - for(q=0; q - #include - """ - weave.inline(code, support_code=support_code, libraries=['gomp'], - arg_names=['N', 'num_inducing', 'input_dim', 'mu', 'Zhat', 'mudist_sq', 'mudist', 'lengthscale2', '_psi2_denom', 'psi2_Zdist_sq', 'psi2_exponent', 'half_log_psi2_denom', 'psi2', 'variance_sq'], - type_converters=weave.converters.blitz, **self.weave_options) - - return mudist, mudist_sq, psi2_exponent, psi2 diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 8aa378ce..cc68de68 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -66,7 +66,7 @@ class BayesianGPLVM(SparseGP, GPLVM): super(BayesianGPLVM, self).parameters_changed() self._log_marginal_likelihood -= self.KL_divergence() - dL_dmu, dL_dS = self.kern.gradients_muS_variational(mu=self.X, S=self.X_variance, Z=self.Z, **self.grad_dict) + dL_dmu, dL_dS = self.kern.gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) # dL: self.q.mean.gradient = dL_dmu