From 1b49f7ab30808effca88bcd85f5d6aa6e038a7e8 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 31 Jan 2014 17:01:50 +0000 Subject: [PATCH 1/3] not calling self.parameters_changed explicitly anymore -> not needed --- GPy/core/gp.py | 2 +- GPy/core/sparse_gp.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 060b617a..6d9ed75d 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -56,7 +56,7 @@ class GP(Model): self.add_parameter(self.kern) self.add_parameter(self.likelihood) - self.parameters_changed() + #self.parameters_changed() def parameters_changed(self): self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index ab1f3bf0..3a6a98cb 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -51,11 +51,8 @@ class SparseGP(GP): self.X_variance = X_variance GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) - self.Z = Param('inducing inputs', self.Z) self.add_parameter(self.Z, index=0) - self.add_parameter(self.kern) - self.add_parameter(self.likelihood) def parameters_changed(self): self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y) From 70e7d72bf26ea9ec3ebd1f5ed2dd4a27341259a1 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Mon, 3 Feb 2014 09:12:43 +0000 Subject: [PATCH 2/3] add spike-and-slab gplvm kernel [unfinished].] --- GPy/kern/parts/ss_rbf.py | 352 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100644 GPy/kern/parts/ss_rbf.py diff --git a/GPy/kern/parts/ss_rbf.py b/GPy/kern/parts/ss_rbf.py new file mode 100644 index 00000000..a234d428 --- /dev/null +++ b/GPy/kern/parts/ss_rbf.py @@ -0,0 +1,352 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +import numpy as np +from kernpart import Kernpart +from ...util.linalg import tdot +from ...util.misc import fast_array_equal, param_to_array +from ...core.parameterization import Param + +class SS_RBF(Kernpart): + """ + The RBF kernel for Spike-and-Slab GPLVM + Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: + + .. math:: + + k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2} + + where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input. + + :param input_dim: the number of input dimensions + :type input_dim: int + :param variance: the variance of the kernel + :type variance: float + :param lengthscale: the vector of lengthscale of the kernel + :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter) + :rtype: kernel object + """ + + def __init__(self, input_dim, variance=1., lengthscale=None, name='rbf'): + super(RBF, self).__init__(input_dim, name) + self.input_dim = input_dim + + if lengthscale is not None: + lengthscale = np.asarray(lengthscale) + assert lengthscale.size == self.input_dim, "bad number of lengthscales" + else: + lengthscale = np.ones(self.input_dim) + + self.variance = Param('variance', variance) + self.lengthscale = Param('lengthscale', lengthscale) + self.lengthscale.add_observer(self, self.update_lengthscale) + self.add_parameters(self.variance, self.lengthscale) + self.parameters_changed() # initializes cache + + def on_input_change(self, X): + #self._K_computations(X, None) + pass + + def update_lengthscale(self, l): + self.lengthscale2 = np.square(self.lengthscale) + + def parameters_changed(self): + # reset cached results + self._X, self._X2 = np.empty(shape=(2, 1)) + self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S + + def K(self, X, X2, target): + self._K_computations(X, X2) + target += self.variance * self._K_dvar + + def Kdiag(self, X, target): + np.add(target, self.variance, target) + + def psi0(self, Z, mu, S, target): + target += self.variance + + def psi1(self, Z, mu, S, target): + self._psi_computations(Z, mu, S) + target += self._psi1 + + def psi2(self, Z, mu, S, target): + self._psi_computations(Z, mu, S) + target += self._psi2 + + def update_gradients_full(self, dL_dK, X): + self._K_computations(X, None) + self.variance.gradient = np.sum(self._K_dvar * dL_dK) + if self.ARD: + self.lengthscale.gradient = self._dL_dlengthscales_via_K(dL_dK, X, None) + else: + self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) + + def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): + #contributions from Kdiag + self.variance.gradient = np.sum(dL_dKdiag) + + #from Knm + self._K_computations(X, Z) + self.variance.gradient += np.sum(dL_dKnm * self._K_dvar) + if self.ARD: + self.lengthscales.gradient = self._dL_dlengthscales_via_K(dL_dKnm, X, Z) + + else: + self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) + + #from Kmm + self._K_computations(Z, None) + self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) + if self.ARD: + self.lengthscales.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) + else: + self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) + + def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + self._psi_computations(Z, mu, S) + + #contributions from psi0: + self.variance.gradient = np.sum(dL_dpsi0) + + #from psi1 + self.variance.gradient += np.sum(dL_dpsi1 * self._psi1 / self.variance) + d_length = self._psi1[:,:,None] * ((self._psi1_dist_sq - 1.)/(self.lengthscale*self._psi1_denom) +1./self.lengthscale) + dpsi1_dlength = d_length * dL_dpsi1[:, :, None] + if not self.ARD: + self.lengthscale.gradeint = dpsi1_dlength.sum() + else: + self.lengthscale.gradient = dpsi1_dlength.sum(0).sum(0) + + #from psi2 + d_var = 2.*self._psi2 / self.variance + d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom) + + self.variance.gradient += np.sum(dL_dpsi2 * d_var) + dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None] + if not self.ARD: + self.lengthscale.gradient += dpsi2_dlength.sum() + else: + self.lengthscale.gradient += dpsi2_dlength.sum(0).sum(0).sum(0) + + #from Kmm + self._K_computations(Z, None) + self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) + if self.ARD: + self.lengthscales.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) + else: + self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) + + def gradients_X(self, dL_dK, X, X2, target): + #if self._X is None or X.base is not self._X.base or X2 is not None: + self._K_computations(X, X2) + if X2 is None: + _K_dist = 2*(X[:, None, :] - X[None, :, :]) + else: + _K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena. + dK_dX = (-self.variance / self.lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2)) + target += np.sum(dK_dX * dL_dK.T[:, :, None], 0) + + def dKdiag_dX(self, dL_dKdiag, X, target): + pass + + #---------------------------------------# + # PSI statistics # + #---------------------------------------# + + def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, target_mu, target_S): + pass + + def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target): + self._psi_computations(Z, mu, S) + denominator = (self.lengthscale2 * (self._psi1_denom)) + dpsi1_dZ = -self._psi1[:, :, None] * ((self._psi1_dist / denominator)) + target += np.sum(dL_dpsi1[:, :, None] * dpsi1_dZ, 0) + + def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, target_mu, target_S): + self._psi_computations(Z, mu, S) + tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom + target_mu += np.sum(dL_dpsi1[:, :, None] * tmp * self._psi1_dist, 1) + target_S += np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1) + + def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target): + self._psi_computations(Z, mu, S) + term1 = self._psi2_Zdist / self.lengthscale2 # num_inducing, num_inducing, input_dim + term2 = self._psi2_mudist / self._psi2_denom / self.lengthscale2 # N, num_inducing, num_inducing, input_dim + dZ = self._psi2[:, :, :, None] * (term1[None] + term2) + target += (dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0) + + def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S): + """Think N,num_inducing,num_inducing,input_dim """ + self._psi_computations(Z, mu, S) + tmp = self._psi2[:, :, :, None] / self.lengthscale2 / self._psi2_denom + target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1) + target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1) + + #---------------------------------------# + # Precomputations # + #---------------------------------------# + + def _K_computations(self, X, X2): + #params = self._get_params() + if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2)):# and fast_array_equal(self._params_save , params)): + #self._X = X.copy() + #self._params_save = params.copy() + if X2 is None: + self._X2 = None + X = X / self.lengthscale + Xsquare = np.sum(np.square(X), 1) + self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :]) + else: + self._X2 = X2.copy() + X = X / self.lengthscale + X2 = X2 / self.lengthscale + self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :]) + self._K_dvar = np.exp(-0.5 * self._K_dist2) + + def _dL_dlengthscales_via_K(self, dL_dK, X, X2): + """ + A helper function for update_gradients_* methods + + Computes the derivative of the objective L wrt the lengthscales via + + dL_dl = sum_{i,j}(dL_dK_{ij} dK_dl) + + assumes self._K_computations has just been called. + + This is only valid if self.ARD=True + """ + target = np.zeros(self.input_dim) + dvardLdK = self._K_dvar * dL_dK + var_len3 = self.variance / np.power(self.lengthscale, 3) + if X2 is None: + # save computation for the symmetrical case + dvardLdK = dvardLdK + dvardLdK.T + code = """ + int q,i,j; + double tmp; + for(q=0; q + #include + """ + weave.inline(code, support_code=support_code, libraries=['gomp'], + arg_names=['N', 'num_inducing', 'input_dim', 'mu', 'Zhat', 'mudist_sq', 'mudist', 'lengthscale2', '_psi2_denom', 'psi2_Zdist_sq', 'psi2_exponent', 'half_log_psi2_denom', 'psi2', 'variance_sq'], + type_converters=weave.converters.blitz, **self.weave_options) + + return mudist, mudist_sq, psi2_exponent, psi2 From b2328c4f47ce3cd58d02d489a4843dded35f821b Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 5 Feb 2014 10:48:23 +0000 Subject: [PATCH 3/3] starting varDTC with uncertain inputs [not working] --- GPy/core/gp.py | 4 ++-- .../latent_function_inference/varDTC.py | 22 ++++++++++++------- GPy/kern/parts/rbf.py | 2 +- GPy/models/sparse_gp_regression.py | 2 +- GPy/util/linalg.py | 11 ++++++++++ 5 files changed, 29 insertions(+), 12 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 6d9ed75d..b9239a03 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -185,7 +185,7 @@ class GP(Model): from ..plotting.matplot_dep import models_plots models_plots.plot_fit_f(self,*args,**kwargs) - def plot(self, *args): + def plot(self, *args, **kwargs): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region @@ -204,7 +204,7 @@ class GP(Model): """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots - models_plots.plot_fit(self,*args) + models_plots.plot_fit(self,*args,**kwargs) def _getstate(self): """ diff --git a/GPy/inference/latent_function_inference/varDTC.py b/GPy/inference/latent_function_inference/varDTC.py index b5ba4c2d..290e234e 100644 --- a/GPy/inference/latent_function_inference/varDTC.py +++ b/GPy/inference/latent_function_inference/varDTC.py @@ -4,6 +4,7 @@ from posterior import Posterior from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs import numpy as np +from GPy.util.linalg import dtrtri log_2_pi = np.log(2*np.pi) class VarDTC(object): @@ -69,19 +70,24 @@ class VarDTC(object): psi2_beta = (psi2 * (beta.flatten().reshape(num_data, 1, 1))).sum(0) else: psi2_beta = psi2.sum(0) * beta - evals, evecs = linalg.eigh(psi2_beta) - clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable - if not np.array_equal(evals, clipped_evals): - pass # print evals - tmp = evecs * np.sqrt(clipped_evals) - tmp = tmp.T + if 0: + evals, evecs = linalg.eigh(psi2_beta) + clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable + if not np.array_equal(evals, clipped_evals): + pass # print evals + tmp = evecs * np.sqrt(clipped_evals) + tmp = tmp.T + # no backsubstitution because of bound explosion on tr(A) if not... + LmInv, _ = dtrtri(Lm, lower=1) + A = LmInv.T.dot(psi2_beta.dot(LmInv)) + print A.sum() else: if het_noise: tmp = psi1 * (np.sqrt(beta.reshape(num_data, 1))) else: tmp = psi1 * (np.sqrt(beta)) - tmp, _ = dtrtrs(Lm, np.asfortranarray(tmp.T), lower=1) - A = tdot(tmp) + tmp, _ = dtrtrs(Lm, np.asfortranarray(tmp.T), lower=1) + A = tdot(tmp) # factor B B = np.eye(num_inducing) + A diff --git a/GPy/kern/parts/rbf.py b/GPy/kern/parts/rbf.py index 89f6894c..4247eb9c 100644 --- a/GPy/kern/parts/rbf.py +++ b/GPy/kern/parts/rbf.py @@ -159,7 +159,7 @@ class RBF(Kernpart): if self.ARD: self.lengthscales.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) else: - self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) + self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) def gradients_X(self, dL_dK, X, X2, target): #if self._X is None or X.base is not self._X.base or X2 is not None: diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 88b0d435..386380b7 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -43,7 +43,7 @@ class SparseGPRegression(SparseGP): likelihood = likelihoods.Gaussian() - SparseGP.__init__(self, X, Y, Z, kernel, likelihood) + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, X_variance=X_variance) self.ensure_default_constraints() def _getstate(self): diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index b8c6a1df..44f3700d 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -41,6 +41,17 @@ else: _blas_available = False warnings.warn("warning: caught this exception:" + str(e)) +def dtrtri(L, lower=0): + """ + Wrapper for lapack dtrtrs function + Inverse of L + + :param L: Triangular Matrix L + :param lower: is matrix lower (true) or upper (false) + :returns: Li, info + """ + return lapack.dtrtri(L, lower=lower) + def dtrtrs(A, B, lower=0, trans=0, unitdiag=0): """ Wrapper for lapack dtrtrs function