diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 13336ef5..d8d1a87a 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -30,7 +30,10 @@ class GP(Model): super(GP, self).__init__(name) assert X.ndim == 2 - self.X = ObservableArray(X) + if isinstance(X, ObservableArray): + self.X = self.X = X + else: self.X = ObservableArray(X) + self.num_data, self.input_dim = self.X.shape assert Y.ndim == 2 diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index dffe2ed1..e8be0f77 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -28,7 +28,9 @@ class ObservableArray(np.ndarray, Observable): """ __array_priority__ = -1 # Never give back ObservableArray def __new__(cls, input_array): - obj = np.atleast_1d(input_array).view(cls) + if not isinstance(input_array, ObservableArray): + obj = np.atleast_1d(input_array).view(cls) + else: obj = input_array cls.__name__ = "ObservableArray\n " return obj diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 5fe63052..d1c0faf8 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -3,21 +3,54 @@ Created on 6 Nov 2013 @author: maxz ''' + +import numpy as np from parameterized import Parameterized from param import Param from transformations import Logexp -class Normal(Parameterized): +class VariationalPrior(object): + def KL_divergence(self, variational_posterior): + raise NotImplementedError, "override this for variational inference of latent space" + + def update_gradients_KL(self, variational_posterior): + """ + updates the gradients for mean and variance **in place** + """ + raise NotImplementedError, "override this for variational inference of latent space" + +class NormalPrior(VariationalPrior): + def KL_divergence(self, variational_posterior): + var_mean = np.square(variational_posterior.mean).sum() + var_S = (variational_posterior.variance - np.log(variational_posterior.variance)).sum() + return 0.5 * (var_mean + var_S) - 0.5 * variational_posterior.input_dim * variational_posterior.num_data + + def update_gradients_KL(self, variational_posterior): + # dL: + variational_posterior.mean.gradient -= variational_posterior.mean + variational_posterior.variance.gradient -= (1. - (1. / (variational_posterior.variance))) * 0.5 + + +class VariationalPosterior(Parameterized): + def __init__(self, means=None, variances=None, name=None, **kw): + super(VariationalPosterior, self).__init__(name=name, **kw) + self.mean = Param("mean", means) + self.variance = Param("variance", variances, Logexp()) + self.add_parameters(self.mean, self.variance) + self.num_data, self.input_dim = self.mean.shape + if self.has_uncertain_inputs(): + assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion" + + def has_uncertain_inputs(self): + return not self.variance is None + + +class NormalPosterior(VariationalPosterior): ''' - Normal distribution for variational approximations. + NormalPosterior distribution for variational approximations. holds the means and variances for a factorizing multivariate normal distribution ''' - def __init__(self, means, variances, name='latent space'): - Parameterized.__init__(self, name=name) - self.mean = Param("mean", means) - self.variance = Param('variance', variances, Logexp()) - self.add_parameters(self.mean, self.variance) def plot(self, *args): """ @@ -30,8 +63,7 @@ class Normal(Parameterized): from ...plotting.matplot_dep import variational_plots return variational_plots.plot(self,*args) - -class SpikeAndSlab(Parameterized): +class SpikeAndSlab(VariationalPosterior): ''' The SpikeAndSlab distribution for variational approximations. ''' @@ -39,11 +71,9 @@ class SpikeAndSlab(Parameterized): """ binary_prob : the probability of the distribution on the slab part. """ - Parameterized.__init__(self, name=name) - self.mean = Param("mean", means) - self.variance = Param('variance', variances, Logexp()) + super(SpikeAndSlab, self).__init__(means, variances, name) self.gamma = Param("binary_prob",binary_prob,) - self.add_parameters(self.mean, self.variance, self.gamma) + self.add_parameter(self.gamma) def plot(self, *args): """ diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 71053867..37f2baf8 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -5,8 +5,9 @@ import numpy as np from ..util.linalg import mdot from gp import GP from parameterization.param import Param -from GPy.inference.latent_function_inference import var_dtc +from ..inference.latent_function_inference import var_dtc from .. import likelihoods +from parameterization.variational import NormalPosterior class SparseGP(GP): """ @@ -45,16 +46,14 @@ class SparseGP(GP): self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] - self.X_variance = X_variance - if self.has_uncertain_inputs(): - assert X_variance.shape == X.shape + self.q = NormalPosterior(X, X_variance) - GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) + GP.__init__(self, self.q.mean, Y, kernel, likelihood, inference_method=inference_method, name=name) self.add_parameter(self.Z, index=0) self.parameters_changed() def has_uncertain_inputs(self): - return not (self.X_variance is None) + return self.q.has_uncertain_inputs() def parameters_changed(self): if self.has_uncertain_inputs(): @@ -81,7 +80,10 @@ class SparseGP(GP): var = Kxx - mdot(Kx.T, self.posterior.woodbury_inv, Kx) else: Kxx = self.kern.Kdiag(Xnew) - var = Kxx - np.sum(Kx * np.dot(self.posterior.woodbury_inv, Kx), 0) + WKx_old = np.dot(np.atleast_3d(self.posterior.woodbury_inv)[:,:,0], Kx) + WKx = np.tensordot(np.atleast_3d(self.posterior.woodbury_inv), Kx, [0,0]) + import ipdb;ipdb.set_trace() + var = Kxx - np.sum(Kx * WKx, 0) else: Kx = self.kern.psi1(self.Z, Xnew, X_variance_new) mu = np.dot(Kx, self.Cpsi1V) diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py index 73741a13..a996e1df 100644 --- a/GPy/inference/latent_function_inference/posterior.py +++ b/GPy/inference/latent_function_inference/posterior.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from ...util.linalg import pdinv, dpotrs, tdot, dtrtrs, dpotri, symmetrify, jitchol, dtrtri +from ...util.linalg import pdinv, dpotrs, dpotri, symmetrify, jitchol class Posterior(object): """ @@ -83,14 +83,15 @@ class Posterior(object): #LiK, _ = dtrtrs(self.woodbury_chol, self._K, lower=1) self._covariance = np.tensordot(np.dot(np.atleast_3d(self.woodbury_inv).T, self._K), self._K, [1,0]).T #self._covariance = self._K - self._K.dot(self.woodbury_inv).dot(self._K) - return self._covariance + return self._covariance.squeeze() @property def precision(self): if self._precision is None: - self._precision = np.zeros(np.atleast_3d(self.covariance).shape) # if one covariance per dimension - for p in xrange(self.covariance.shape[-1]): - self._precision[:,:,p] = pdinv(self.covariance[:,:,p])[0] + cov = np.atleast_3d(self.covariance) + self._precision = np.zeros(cov.shape) # if one covariance per dimension + for p in xrange(cov.shape[-1]): + self._precision[:,:,p] = pdinv(cov[:,:,p])[0] return self._precision @property @@ -98,7 +99,10 @@ class Posterior(object): if self._woodbury_chol is None: #compute woodbury chol from if self._woodbury_inv is not None: - _, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv) + winv = np.atleast_3d(self._woodbury_inv) + self._woodbury_chol = np.zeros(winv.shape) + for p in xrange(winv.shape[-1]): + self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2] #Li = jitchol(self._woodbury_inv) #self._woodbury_chol, _ = dtrtri(Li) #W, _, _, _, = pdinv(self._woodbury_inv) @@ -132,7 +136,7 @@ class Posterior(object): @property def K_chol(self): if self._K_chol is None: - self._K_chol = dportf(self._K) + self._K_chol = jitchol(self._K) return self._K_chol diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 3ef231b3..8bd9b6d1 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -127,11 +127,12 @@ from GPy.core.model import Model class Kern_check_model(Model): """This is a dummy model class used as a base class for checking that the gradients of a given kernel are implemented correctly. It enables checkgradient() to be called independently on a kernel.""" def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): + from GPy.kern import RBF Model.__init__(self, 'kernel_test_model') num_samples = 20 num_samples2 = 10 if kernel==None: - kernel = GPy.kern.rbf(1) + kernel = RBF(1) if X==None: X = np.random.randn(num_samples, kernel.input_dim) if dL_dK==None: diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index 61a1dbd3..a66b3705 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -106,51 +106,52 @@ class Linear(Kern): # variational # #---------------------------------------# - def psi0(self, Z, mu, S): - return np.sum(self.variances * self._mu2S(mu, S), 1) + def psi0(self, Z, posterior_variational): + return np.sum(self.variances * self._mu2S(posterior_variational), 1) - def psi1(self, Z, mu, S): - return self.K(mu, Z) #the variance, it does nothing + def psi1(self, Z, posterior_variational): + return self.K(posterior_variational.mean, Z) #the variance, it does nothing - def psi2(self, Z, mu, S): + def psi2(self, Z, posterior_variational): ZA = Z * self.variances - ZAinner = self._ZAinner(mu, S, Z) + ZAinner = self._ZAinner(posterior_variational, Z) return np.dot(ZAinner, ZA.T) - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): + mu, S = posterior_variational.mean, posterior_variational.variance # psi0: - tmp = dL_dpsi0[:, None] * self._mu2S(mu, S) + tmp = dL_dpsi0[:, None] * self._mu2S(posterior_variational) if self.ARD: grad = tmp.sum(0) else: grad = np.atleast_1d(tmp.sum()) #psi1 self.update_gradients_full(dL_dpsi1, mu, Z) grad += self.variances.gradient #psi2 - tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(mu, S, Z)[:, :, None, :] * (2. * Z)[None, None, :, :]) + tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(posterior_variational, Z)[:, :, None, :] * (2. * Z)[None, None, :, :]) if self.ARD: grad += tmp.sum(0).sum(0).sum(0) else: grad += tmp.sum() #from Kmm self.update_gradients_full(dL_dKmm, Z, None) self.variances.gradient += grad - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): # Kmm grad = self.gradients_X(dL_dKmm, Z, None) #psi1 - grad += self.gradients_X(dL_dpsi1.T, Z, mu) + grad += self.gradients_X(dL_dpsi1.T, Z, posterior_variational.mean) #psi2 - self._weave_dpsi2_dZ(dL_dpsi2, Z, mu, S, grad) + self._weave_dpsi2_dZ(dL_dpsi2, Z, posterior_variational, grad) return grad - def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): - grad_mu, grad_S = np.zeros(mu.shape), np.zeros(mu.shape) + def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): + grad_mu, grad_S = np.zeros(posterior_variational.mean.shape), np.zeros(posterior_variational.mean.shape) # psi0 - grad_mu += dL_dpsi0[:, None] * (2.0 * mu * self.variances) + grad_mu += dL_dpsi0[:, None] * (2.0 * posterior_variational.mean * self.variances) grad_S += dL_dpsi0[:, None] * self.variances # psi1 grad_mu += (dL_dpsi1[:, :, None] * (Z * self.variances)).sum(1) # psi2 - self._weave_dpsi2_dmuS(dL_dpsi2, Z, mu, S, grad_mu, grad_S) + self._weave_dpsi2_dmuS(dL_dpsi2, Z, posterior_variational, grad_mu, grad_S) return grad_mu, grad_S @@ -159,7 +160,7 @@ class Linear(Kern): #--------------------------------------------------# - def _weave_dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S): + def _weave_dpsi2_dmuS(self, dL_dpsi2, Z, pv, target_mu, target_S): # Think N,num_inducing,num_inducing,input_dim ZA = Z * self.variances AZZA = ZA.T[:, None, :, None] * ZA[None, :, None, :] @@ -202,15 +203,16 @@ class Linear(Kern): weave_options = {'headers' : [''], 'extra_compile_args': ['-fopenmp -O3'], #-march=native'], 'extra_link_args' : ['-lgomp']} - + + mu = pv.mean N,num_inducing,input_dim,mu = mu.shape[0],Z.shape[0],mu.shape[1],param_to_array(mu) weave.inline(code, support_code=support_code, libraries=['gomp'], arg_names=['N','num_inducing','input_dim','mu','AZZA','AZZA_2','target_mu','target_S','dL_dpsi2'], type_converters=weave.converters.blitz,**weave_options) - def _weave_dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target): - AZA = self.variances*self._ZAinner(mu, S, Z) + def _weave_dpsi2_dZ(self, dL_dpsi2, Z, pv, target): + AZA = self.variances*self._ZAinner(pv, Z) code=""" int n,m,mm,q; #pragma omp parallel for private(n,mm,q) @@ -232,21 +234,21 @@ class Linear(Kern): 'extra_compile_args': ['-fopenmp -O3'], #-march=native'], 'extra_link_args' : ['-lgomp']} - N,num_inducing,input_dim = mu.shape[0],Z.shape[0],mu.shape[1] - mu = param_to_array(mu) + N,num_inducing,input_dim = pv.mean.shape[0],Z.shape[0],pv.mean.shape[1] + mu = param_to_array(pv.mean) weave.inline(code, support_code=support_code, libraries=['gomp'], arg_names=['N','num_inducing','input_dim','AZA','target','dL_dpsi2'], type_converters=weave.converters.blitz,**weave_options) - def _mu2S(self, mu, S): - return np.square(mu) + S + def _mu2S(self, pv): + return np.square(pv.mean) + pv.variance - def _ZAinner(self, mu, S, Z): + def _ZAinner(self, pv, Z): ZA = Z*self.variances - inner = (mu[:, None, :] * mu[:, :, None]) - diag_indices = np.diag_indices(mu.shape[1], 2) - inner[:, diag_indices[0], diag_indices[1]] += S + inner = (pv.mean[:, None, :] * pv.mean[:, :, None]) + diag_indices = np.diag_indices(pv.mean.shape[1], 2) + inner[:, diag_indices[0], diag_indices[1]] += pv.variance return np.dot(ZA, inner).swapaxes(0, 1) # NOTE: self.ZAinner \in [num_inducing x N x input_dim]! diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 7cc2e695..a6ff9424 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -18,7 +18,7 @@ class Stationary(Kern): lengthscale = np.ones(1) else: lengthscale = np.asarray(lengthscale) - assert lengthscale.size == 1 "Only lengthscale needed for non-ARD kernel" + assert lengthscale.size == 1, "Only lengthscale needed for non-ARD kernel" else: if lengthscale is not None: lengthscale = np.asarray(lengthscale) diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index cc68de68..7b09e0b1 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -8,7 +8,7 @@ from ..core import SparseGP from ..likelihoods import Gaussian from ..inference.optimization import SCG from ..util import linalg -from ..core.parameterization.variational import Normal +from ..core.parameterization.variational import NormalPosterior, NormalPrior class BayesianGPLVM(SparseGP, GPLVM): """ @@ -29,7 +29,7 @@ class BayesianGPLVM(SparseGP, GPLVM): self.init = init if X_variance is None: - X_variance = np.clip((np.ones_like(X) * 0.5) + .01 * np.random.randn(*X.shape), 0.001, 1) + X_variance = np.random.uniform(0,.1,X.shape) if Z is None: Z = np.random.permutation(X.copy())[:num_inducing] @@ -40,7 +40,9 @@ class BayesianGPLVM(SparseGP, GPLVM): if likelihood is None: likelihood = Gaussian() - self.q = Normal(X, X_variance) + self.q = NormalPosterior(X, X_variance) + self.variational_prior = NormalPrior() + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, X_variance, name, **kwargs) self.add_parameter(self.q, index=0) #self.ensure_default_constraints() @@ -57,24 +59,17 @@ class BayesianGPLVM(SparseGP, GPLVM): self.init = state.pop() SparseGP._setstate(self, state) - def KL_divergence(self): - var_mean = np.square(self.X).sum() - var_S = np.sum(self.X_variance - np.log(self.X_variance)) - return 0.5 * (var_mean + var_S) - 0.5 * self.input_dim * self.num_data - def parameters_changed(self): super(BayesianGPLVM, self).parameters_changed() - - self._log_marginal_likelihood -= self.KL_divergence() - dL_dmu, dL_dS = self.kern.gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) - - # dL: - self.q.mean.gradient = dL_dmu - self.q.variance.gradient = dL_dS - - # dKL: - self.q.mean.gradient -= self.X - self.q.variance.gradient -= (1. - (1. / (self.X_variance))) * 0.5 + self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.q) + + # TODO: This has to go into kern + # maybe a update_gradients_q_variational? + self.q.mean.gradient, self.q.variance.gradient = self.kern.gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) + + # update for the KL divergence + self.variational_prior.update_gradients_KL(self.q) + def plot_latent(self, plot_inducing=True, *args, **kwargs): """ @@ -147,6 +142,7 @@ class BayesianGPLVM(SparseGP, GPLVM): """ See GPy.plotting.matplot_dep.dim_reduction_plots.plot_steepest_gradient_map """ + import sys assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import dim_reduction_plots