diff --git a/GPy/core/gp.py b/GPy/core/gp.py index d8d1a87a..81985773 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -11,6 +11,7 @@ from parameterization import ObservableArray from .. import likelihoods from ..likelihoods.gaussian import Gaussian from ..inference.latent_function_inference import exact_gaussian_inference +from parameterization.variational import VariationalPosterior class GP(Model): """ @@ -30,10 +31,10 @@ class GP(Model): super(GP, self).__init__(name) assert X.ndim == 2 - if isinstance(X, ObservableArray): - self.X = self.X = X + if isinstance(X, ObservableArray) or isinstance(X, VariationalPosterior): + self.X = X else: self.X = ObservableArray(X) - + self.num_data, self.input_dim = self.X.shape assert Y.ndim == 2 diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index d1c0faf8..c51a8021 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -9,7 +9,10 @@ from parameterized import Parameterized from param import Param from transformations import Logexp -class VariationalPrior(object): +class VariationalPrior(Parameterized): + def __init__(self, name=None, **kw): + super(VariationalPrior, self).__init__(name=name, **kw) + def KL_divergence(self, variational_posterior): raise NotImplementedError, "override this for variational inference of latent space" @@ -19,7 +22,7 @@ class VariationalPrior(object): """ raise NotImplementedError, "override this for variational inference of latent space" -class NormalPrior(VariationalPrior): +class NormalPrior(VariationalPrior): def KL_divergence(self, variational_posterior): var_mean = np.square(variational_posterior.mean).sum() var_S = (variational_posterior.variance - np.log(variational_posterior.variance)).sum() @@ -30,11 +33,39 @@ class NormalPrior(VariationalPrior): variational_posterior.mean.gradient -= variational_posterior.mean variational_posterior.variance.gradient -= (1. - (1. / (variational_posterior.variance))) * 0.5 +class SpikeAndSlabPrior(VariationalPrior): + def __init__(self, variance = 1.0, pi = 0.5, name='SpikeAndSlabPrior', **kw): + super(VariationalPrior, self).__init__(name=name, **kw) + assert variance==1.0, "Not Implemented!" + self.pi = Param('pi', pi) + self.variance = Param('variance',variance) + self.add_parameters(self.pi, self.variance) + + def KL_divergence(self, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance + gamma = variational_posterior.binary_prob + var_mean = np.square(mu) + var_S = (S - np.log(S)) + var_gamma = (gamma*np.log(gamma/self.pi)).sum()+((1-gamma)*np.log((1-gamma)/(1-self.pi))).sum() + return var_gamma+ 0.5 * (gamma* (var_mean + var_S -1)).sum() + + def update_gradients_KL(self, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance + gamma = variational_posterior.binary_prob + + gamma.gradient -= np.log((1-self.pi)/self.pi*gamma/(1.-gamma))+(np.square(mu)+S-np.log(S)-1.)/2. + mu.gradient -= gamma*mu + S.gradient -= (1. - (1. / (S))) * gamma /2. + class VariationalPosterior(Parameterized): def __init__(self, means=None, variances=None, name=None, **kw): super(VariationalPosterior, self).__init__(name=name, **kw) self.mean = Param("mean", means) + self.ndim = self.mean.ndim + self.shape = self.mean.shape self.variance = Param("variance", variances, Logexp()) self.add_parameters(self.mean, self.variance) self.num_data, self.input_dim = self.mean.shape @@ -63,7 +94,7 @@ class NormalPosterior(VariationalPosterior): from ...plotting.matplot_dep import variational_plots return variational_plots.plot(self,*args) -class SpikeAndSlab(VariationalPosterior): +class SpikeAndSlabPosterior(VariationalPosterior): ''' The SpikeAndSlab distribution for variational approximations. ''' @@ -71,7 +102,7 @@ class SpikeAndSlab(VariationalPosterior): """ binary_prob : the probability of the distribution on the slab part. """ - super(SpikeAndSlab, self).__init__(means, variances, name) + super(SpikeAndSlabPosterior, self).__init__(means, variances, name) self.gamma = Param("binary_prob",binary_prob,) self.add_parameter(self.gamma) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index dfe2d38f..b1a3c12e 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -45,34 +45,47 @@ class SparseGP(GP): self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] - - if not (X_variance is None): - self.q = NormalPosterior(X, X_variance) - GP.__init__(self, self.q.mean, Y, kernel, likelihood, inference_method=inference_method, name=name) - else: - self.X = X - GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) + + GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) self.add_parameter(self.Z, index=0) self.parameters_changed() def has_uncertain_inputs(self): - return hasattr(self, 'q') - + return isinstance(self.X, VariationalPosterior) + def parameters_changed(self): - if self.has_uncertain_inputs(): - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference_latent(self.kern, self.q, self.Z, self.likelihood, self.Y) - # gradients - self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood')) - self.kern.update_gradients_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) - self.Z.gradient = self.kern.gradients_Z_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y) + self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood')) + if isinstance(self.X, VariationalPosterior): + #gradients wrt kernel + dL_dKmm = self.grad_dict.pop('dL_dKmm') + self.kern.update_gradients_full(dL_dKmm, self.Z, None) + target = np.zeros(self.kern.size) + self.kern._collect_gradient(target) + self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) + self.kern._collect_gradient(target) + self.kern._set_gradient(target) + + #gradients wrt Z + self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z) + self.Z.gradient += self.kern.gradients_Z_expectations( + self.grad_dict['dL_dpsi1'], self.grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X) else: - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, None, self.Z, self.likelihood, self.Y) - # gradients - self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood')) - self.kern.update_gradients_sparse(X=self.X, Z=self.Z, **self.grad_dict) - self.Z.gradient = self.kern.gradients_Z_sparse(X=self.X, Z=self.Z, **self.grad_dict) - + #gradients wrt kernel + target = np.zeros(self.kern.size) + self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X) + self.kern._collect_gradient(target) + self.kern.update_gradients_full(self.grad_dict['dL_dKnm'], self.X, self.Z) + self.kern._collect_gradient(target) + self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z, None) + self.kern._collect_gradient(target) + self.kern._set_gradient(target) + + #gradients wrt Z + self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) + self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X) + def _raw_predict(self, Xnew, X_variance_new=None, full_cov=False): """ Make a prediction for the latent function values diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 4345cb90..fc0bc085 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -274,6 +274,7 @@ def bgplvm_simulation(optimize=True, verbose=1, _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) Y = Ylist[0] k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) + #k = kern.RBF(Q, ARD=True, lengthscale=10.) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k) if optimize: @@ -281,7 +282,7 @@ def bgplvm_simulation(optimize=True, verbose=1, m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05) if plot: - m.q.plot("BGPLVM Latent Space 1D") + m.X.plot("BGPLVM Latent Space 1D") m.kern.plot_ARD('BGPLVM Simulation ARD Parameters') return m @@ -302,7 +303,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, m = BayesianGPLVM(Y.copy(), Q, init="random", num_inducing=num_inducing, kernel=k) m.inference_method = VarDTCMissingData() m.Y[inan] = _np.nan - m.q.variance *= .1 + m.X.variance *= .1 m.parameters_changed() m.Yreal = Y @@ -311,7 +312,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05) if plot: - m.q.plot("BGPLVM Latent Space 1D") + m.X.plot("BGPLVM Latent Space 1D") m.kern.plot_ARD('BGPLVM Simulation ARD Parameters') return m diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 5cac1857..aa6bbbf9 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -16,7 +16,7 @@ def olympic_marathon_men(optimize=True, plot=True): m = GPy.models.GPRegression(data['X'], data['Y']) # set the lengthscale to be something sensible (defaults to 1) - m['rbf_lengthscale'] = 10 + m.kern.lengthscale = 10. if optimize: m.optimize('bfgs', max_iters=200) @@ -41,11 +41,10 @@ def coregionalization_toy2(optimize=True, plot=True): Y = np.vstack((Y1, Y2)) #build the kernel - k1 = GPy.kern.RBF(1) + GPy.kern.bias(1) - k2 = GPy.kern.coregionalize(2,1) + k1 = GPy.kern.RBF(1) + GPy.kern.Bias(1) + k2 = GPy.kern.Coregionalize(2,1) k = k1**k2 m = GPy.models.GPRegression(X, Y, kernel=k) - m.constrain_fixed('.*rbf_var', 1.) if optimize: m.optimize('bfgs', max_iters=100) @@ -86,11 +85,13 @@ def coregionalization_sparse(optimize=True, plot=True): """ #fetch the data from the non sparse examples m = coregionalization_toy2(optimize=False, plot=False) - X, Y = m.X, m.likelihood.Y + X, Y = m.X, m.Y + + k = GPy.kern.RBF(1)**GPy.kern.Coregionalize(2) #construct a model - m = GPy.models.SparseGPRegression(X,Y) - m.constrain_fixed('iip_\d+_1') # don't optimize the inducing input indexes + m = GPy.models.SparseGPRegression(X,Y, num_inducing=25, kernel=k) + m.Z[:,1].fix() # don't optimize the inducing input indexes if optimize: m.optimize('bfgs', max_iters=100, messages=1) @@ -128,7 +129,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True): np.random.randint(0, 4, num_inducing)[:, None])) k1 = GPy.kern.RBF(1) - k2 = GPy.kern.coregionalize(output_dim=5, rank=5) + k2 = GPy.kern.Coregionalize(output_dim=5, rank=5) k = k1**k2 m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True) @@ -322,7 +323,7 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1) else: kernel = GPy.kern.RBF(X.shape[1], ARD=1) - kernel += GPy.kern.White(X.shape[1]) + GPy.kern.bias(X.shape[1]) + kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1]) m = GPy.models.GPRegression(X, Y, kernel) # len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25 # m.set_prior('.*lengthscale',len_prior) @@ -361,7 +362,7 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, o kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1) else: kernel = GPy.kern.RBF(X.shape[1], ARD=1) - #kernel += GPy.kern.bias(X.shape[1]) + #kernel += GPy.kern.Bias(X.shape[1]) X_variance = np.ones(X.shape) * 0.5 m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance) # len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25 diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index cc630bfa..66ab3cbe 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -3,6 +3,7 @@ from posterior import Posterior from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify +from ...core.parameterization.variational import VariationalPosterior import numpy as np from ...util.misc import param_to_array log_2_pi = np.log(2*np.pi) @@ -23,13 +24,13 @@ class VarDTC(object): from ...util.caching import Cacher self.get_trYYT = Cacher(self._get_trYYT, 1) self.get_YYTfactor = Cacher(self._get_YYTfactor, 1) - + def _get_trYYT(self, Y): return param_to_array(np.sum(np.square(Y))) def _get_YYTfactor(self, Y): """ - find a matrix L which satisfies LLT = YYT. + find a matrix L which satisfies LLT = YYT. Note that L may have fewer columns than Y. """ @@ -38,28 +39,26 @@ class VarDTC(object): return param_to_array(Y) else: return jitchol(tdot(Y)) - + def get_VVTfactor(self, Y, prec): return Y * prec # TODO chache this, and make it effective - - def inference(self, kern, X, X_variance, Z, likelihood, Y): - """Inference for normal sparseGP""" - uncertain_inputs = False - psi0, psi1, psi2 = _compute_psi(kern, X, Z) - return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) - def inference_latent(self, kern, posterior_variational, Z, likelihood, Y): - """Inference for GPLVM with uncertain inputs""" - uncertain_inputs = True - psi0, psi1, psi2 = _compute_psi_latent(kern, posterior_variational, Z) - return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) - - def _inference(self, kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs): + def inference(self, kern, X, Z, likelihood, Y): + if isinstance(X, VariationalPosterior): + uncertain_inputs = True + psi0 = kern.psi0(Z, X) + psi1 = kern.psi1(Z, X) + psi2 = kern.psi2(Z, X) + else: + uncertain_inputs = False + psi0 = kern.Kdiag(X) + psi1 = kern.K(X, Z) + psi2 = None #see whether we're using variational uncertain inputs - + _, output_dim = Y.shape - + #see whether we've got a different noise variance for each datum beta = 1./np.squeeze(likelihood.variance) @@ -69,16 +68,16 @@ class VarDTC(object): VVT_factor = beta*Y #VVT_factor = beta*Y trYYT = self.get_trYYT(Y) - + # do the inference: het_noise = beta.size < 1 num_inducing = Z.shape[0] num_data = Y.shape[0] # kernel computations, using BGPLVM notation - Kmm = kern.K(Z) - + Kmm = kern.K(Z) + Lm = jitchol(Kmm) - + # The rather complex computations of A if uncertain_inputs: if het_noise: @@ -124,33 +123,33 @@ class VarDTC(object): dL_dKmm = backsub_both_sides(Lm, delit) # derivatives of L w.r.t. psi - dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, - VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, + dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, + VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs) - + # log marginal likelihood - log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, + log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit) - + #put the gradients in the right places - partial_for_likelihood = _compute_partial_for_likelihood(likelihood, - het_noise, uncertain_inputs, LB, - _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, - psi0, psi1, beta, + partial_for_likelihood = _compute_partial_for_likelihood(likelihood, + het_noise, uncertain_inputs, LB, + _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, + psi0, psi1, beta, data_fit, num_data, output_dim, trYYT) - + #likelihood.update_gradients(partial_for_likelihood) if uncertain_inputs: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dpsi0':dL_dpsi0, - 'dL_dpsi1':dL_dpsi1, - 'dL_dpsi2':dL_dpsi2, + grad_dict = {'dL_dKmm': dL_dKmm, + 'dL_dpsi0':dL_dpsi0, + 'dL_dpsi1':dL_dpsi1, + 'dL_dpsi2':dL_dpsi2, 'partial_for_likelihood':partial_for_likelihood} else: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dKdiag':dL_dpsi0, - 'dL_dKnm':dL_dpsi1, + grad_dict = {'dL_dKmm': dL_dKmm, + 'dL_dKdiag':dL_dpsi0, + 'dL_dKnm':dL_dpsi1, 'partial_for_likelihood':partial_for_likelihood} #get sufficient things for posterior prediction @@ -181,7 +180,7 @@ class VarDTCMissingData(object): from ...util.caching import Cacher self._Y = Cacher(self._subarray_computations, 1) pass - + def _subarray_computations(self, Y): inan = np.isnan(Y) has_none = inan.any() @@ -202,19 +201,18 @@ class VarDTCMissingData(object): self._subarray_indices = [[slice(None),slice(None)]] return [Y], [(Y**2).sum()] - def inference(self, kern, X, X_variance, Z, likelihood, Y): - """Inference for normal sparseGP""" - uncertain_inputs = False - psi0, psi1, psi2 = _compute_psi(kern, X, Z) - return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) + def inference(self, kern, X, Z, likelihood, Y): + if isinstance(X, VariationalPosterior): + uncertain_inputs = True + psi0 = kern.psi0(Z, X) + psi1 = kern.psi1(Z, X) + psi2 = kern.psi2(Z, X) + else: + uncertain_inputs = False + psi0 = kern.Kdiag(X) + psi1 = kern.K(X, Z) + psi2 = None - def inference_latent(self, kern, posterior_variational, Z, likelihood, Y): - """Inference for GPLVM with uncertain inputs""" - uncertain_inputs = True - psi0, psi1, psi2 = _compute_psi_latent(kern, posterior_variational, Z) - return self._inference(kern, psi0, psi1, psi2, Z, likelihood, Y, uncertain_inputs) - - def _inference(self, kern, psi0_all, psi1_all, psi2_all, Z, likelihood, Y, uncertain_inputs): Ys, traces = self._Y(Y) beta_all = 1./likelihood.variance het_noise = beta_all.size != 1 @@ -226,15 +224,15 @@ class VarDTCMissingData(object): dL_dpsi1_all = np.zeros((Y.shape[0], num_inducing)) if uncertain_inputs: dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing)) - + partial_for_likelihood = 0 woodbury_vector = np.zeros((num_inducing, Y.shape[1])) woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1])) dL_dKmm = 0 log_marginal = 0 - + Kmm = kern.K(Z) - #factor Kmm + #factor Kmm Lm = jitchol(Kmm) if uncertain_inputs: LmInv = dtrtri(Lm) @@ -242,11 +240,11 @@ class VarDTCMissingData(object): full_VVT_factor = VVT_factor_all.shape[1] == Y.shape[1] if not full_VVT_factor: psi1V = np.dot(Y.T*beta_all, psi1_all).T - + for y, trYYT, [v, ind] in itertools.izip(Ys, traces, self._subarray_indices): if het_noise: beta = beta_all[ind] else: beta = beta_all[0] - + VVT_factor = (beta*y) VVT_factor_all[v, ind].flat = VVT_factor.flat output_dim = y.shape[1] @@ -256,7 +254,7 @@ class VarDTCMissingData(object): if uncertain_inputs: psi2 = psi2_all[v, :] else: psi2 = None num_data = psi1.shape[0] - + if uncertain_inputs: if het_noise: psi2_beta = psi2 * (beta.flatten().reshape(num_data, 1, 1)).sum(0) else: psi2_beta = psi2.sum(0) * beta @@ -270,13 +268,13 @@ class VarDTCMissingData(object): # factor B B = np.eye(num_inducing) + A LB = jitchol(B) - + psi1Vf = psi1.T.dot(VVT_factor) tmp, _ = dtrtrs(Lm, psi1Vf, lower=1, trans=0) _LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0) tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1) Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1) - + # data fit and derivative of L w.r.t. Kmm delit = tdot(_LBi_Lmi_psi1Vf) data_fit = np.trace(delit) @@ -287,34 +285,34 @@ class VarDTCMissingData(object): dL_dKmm += backsub_both_sides(Lm, delit) # derivatives of L w.r.t. psi - dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, - VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, + dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, + VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs) - + #import ipdb;ipdb.set_trace() dL_dpsi0_all[v] += dL_dpsi0 dL_dpsi1_all[v, :] += dL_dpsi1 if uncertain_inputs: dL_dpsi2_all[v, :] += dL_dpsi2 - + # log marginal likelihood - log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, + log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit) #put the gradients in the right places - partial_for_likelihood += _compute_partial_for_likelihood(likelihood, - het_noise, uncertain_inputs, LB, - _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, - psi0, psi1, beta, + partial_for_likelihood += _compute_partial_for_likelihood(likelihood, + het_noise, uncertain_inputs, LB, + _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, + psi0, psi1, beta, data_fit, num_data, output_dim, trYYT) - + if full_VVT_factor: woodbury_vector[:, ind] = Cpsi1Vf else: print 'foobar' tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0) tmp, _ = dpotrs(LB, tmp, lower=1) woodbury_vector[:, ind] = dtrtrs(Lm, tmp, lower=1, trans=1)[0] - + #import ipdb;ipdb.set_trace() Bi, _ = dpotri(LB, lower=1) symmetrify(Bi) @@ -325,15 +323,15 @@ class VarDTCMissingData(object): # gradients: if uncertain_inputs: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dpsi0':dL_dpsi0_all, - 'dL_dpsi1':dL_dpsi1_all, - 'dL_dpsi2':dL_dpsi2_all, + grad_dict = {'dL_dKmm': dL_dKmm, + 'dL_dpsi0':dL_dpsi0_all, + 'dL_dpsi1':dL_dpsi1_all, + 'dL_dpsi2':dL_dpsi2_all, 'partial_for_likelihood':partial_for_likelihood} else: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dKdiag':dL_dpsi0_all, - 'dL_dKnm':dL_dpsi1_all, + grad_dict = {'dL_dKmm': dL_dKmm, + 'dL_dKdiag':dL_dpsi0_all, + 'dL_dKnm':dL_dpsi1_all, 'partial_for_likelihood':partial_for_likelihood} #get sufficient things for posterior prediction @@ -350,26 +348,13 @@ class VarDTCMissingData(object): #Bi = -dpotri(LB_all, lower=1)[0] #from ...util import diag #diag.add(Bi, 1) - + #woodbury_inv = backsub_both_sides(Lm, Bi) - + post = Posterior(woodbury_inv=woodbury_inv_all, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm) return post, log_marginal, grad_dict - -def _compute_psi(kern, X, Z): - psi0 = kern.Kdiag(X) - psi1 = kern.K(X, Z) - psi2 = None - return psi0, psi1, psi2 - -def _compute_psi_latent(kern, posterior_variational, Z): - psi0 = kern.psi0(Z, posterior_variational) - psi1 = kern.psi1(Z, posterior_variational) - psi2 = kern.psi2(Z, posterior_variational) - return psi0, psi1, psi2 - def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs): dL_dpsi0 = -0.5 * output_dim * (beta * np.ones([num_data, 1])).flatten() dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T) diff --git a/GPy/inference/optimization/conjugate_gradient_descent.py b/GPy/inference/optimization/conjugate_gradient_descent.py index 9eabf5dd..8f90b536 100644 --- a/GPy/inference/optimization/conjugate_gradient_descent.py +++ b/GPy/inference/optimization/conjugate_gradient_descent.py @@ -3,7 +3,7 @@ Created on 24 Apr 2013 @author: maxz ''' -from GPy.inference.gradient_descent_update_rules import FletcherReeves, \ +from gradient_descent_update_rules import FletcherReeves, \ PolakRibiere from Queue import Empty from multiprocessing import Value diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index a1f57619..5dd6e554 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -3,24 +3,10 @@ from _src.rbf import RBF from _src.linear import Linear from _src.static import Bias, White from _src.brownian import Brownian +from _src.sympykern import Sympykern from _src.stationary import Exponential, Matern32, Matern52, ExpQuad, RatQuad, Cosine from _src.mlp import MLP from _src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52 -from _src.independent_outputs import IndependentOutputs -#import coregionalize -#import eq_ode1 -#import finite_dimensional -#import fixed -#import gibbs -#import hetero -#import hierarchical -#import ODE_1 -#import periodic_exponential -#import periodic_Matern32 -#import periodic_Matern52 -#import poly -#import rbfcos -#import rbf -#import rbf_inv -#import spline -#import symmetric +from _src.independent_outputs import IndependentOutputs, Hierarchical +from _src.coregionalize import Coregionalize +from _src.ssrbf import SSRBF diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index a91a9c9e..1466800c 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -45,9 +45,6 @@ class Add(Kern): def update_gradients_full(self, dL_dK, X): [p.update_gradients_full(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] - def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - [p.update_gradients_sparse(dL_dKmm, dL_dKnm, dL_dKdiag, X[:,i_s], Z[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] - def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -70,13 +67,13 @@ class Add(Kern): return sum([p.Kdiag(X[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]) - def psi0(self, Z, mu, S): + def psi0(self, Z, variational_posterior): return np.sum([p.psi0(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) - def psi1(self, Z, mu, S): + def psi1(self, Z, variational_posterior): return np.sum([p.psi1(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) - def psi2(self, Z, mu, S): + def psi2(self, Z, variational_posterior): psi2 = np.sum([p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) # compute the "cross" terms @@ -104,7 +101,7 @@ class Add(Kern): raise NotImplementedError, "psi2 cannot be computed for this kernel" return psi2 - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from white import White from rbf import RBF #from rbf_inv import RBFInv @@ -127,10 +124,10 @@ class Add(Kern): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. - p1.update_gradients_variational(dL_dKmm, dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from white import White from rbf import RBF #from rbf_inv import rbfinv @@ -154,10 +151,10 @@ class Add(Kern): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. - target += p1.gradients_z_variational(dL_dKmm, dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + target += p1.gradients_z_variational(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) return target - def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from white import white from rbf import rbf #from rbf_inv import rbfinv @@ -182,7 +179,7 @@ class Add(Kern): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(z[:,is2], mu[:,is2], s[:,is2]) * 2. - a, b = p1.gradients_muS_variational(dL_dkmm, dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], s[:,is1], z[:,is1]) + a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], s[:,is1], z[:,is1]) target_mu += a target_S += b return target_mu, target_S diff --git a/GPy/kern/_src/constructors.py b/GPy/kern/_src/constructors.py deleted file mode 100644 index 1cbbfd76..00000000 --- a/GPy/kern/_src/constructors.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). -# Licensed under the BSD 3-clause license (see LICENSE.txt) - -import numpy as np -from kern import kern -import parts - -def rbf_inv(input_dim,variance=1., inv_lengthscale=None,ARD=False,name='inverse rbf'): - """ - Construct an RBF kernel - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.rbf_inv.RBFInv(input_dim,variance,inv_lengthscale,ARD,name=name) - return kern(input_dim, [part]) - -def rbf(input_dim,variance=1., lengthscale=None,ARD=False, name='rbf'): - """ - Construct an RBF kernel - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.rbf.RBF(input_dim,variance,lengthscale,ARD, name=name) - return kern(input_dim, [part]) - -def linear(input_dim,variances=None,ARD=False,name='linear'): - """ - Construct a linear kernel. - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variances: - :type variances: np.ndarray - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.linear.Linear(input_dim,variances,ARD,name=name) - return kern(input_dim, [part]) - -def mlp(input_dim,variance=1., weight_variance=None,bias_variance=100.,ARD=False): - """ - Construct an MLP kernel - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param weight_scale: the lengthscale of the kernel - :type weight_scale: vector of weight variances for input weights in neural network (length 1 if kernel is isotropic) - :param bias_variance: the variance of the biases in the neural network. - :type bias_variance: float - :param ARD: Auto Relevance Determination (allows for ARD version of covariance) - :type ARD: Boolean - - """ - part = parts.mlp.MLP(input_dim,variance,weight_variance,bias_variance,ARD) - return kern(input_dim, [part]) - -def gibbs(input_dim,variance=1., mapping=None): - """ - - Gibbs and MacKay non-stationary covariance function. - - .. math:: - - r = \\sqrt{((x_i - x_j)'*(x_i - x_j))} - - k(x_i, x_j) = \\sigma^2*Z*exp(-r^2/(l(x)*l(x) + l(x')*l(x'))) - - Z = \\sqrt{2*l(x)*l(x')/(l(x)*l(x) + l(x')*l(x')} - - Where :math:`l(x)` is a function giving the length scale as a function of space. - - This is the non stationary kernel proposed by Mark Gibbs in his 1997 - thesis. It is similar to an RBF but has a length scale that varies - with input location. This leads to an additional term in front of - the kernel. - - The parameters are :math:`\\sigma^2`, the process variance, and the parameters of l(x) which is a function that can be specified by the user, by default an multi-layer peceptron is used is used. - - :param input_dim: the number of input dimensions - :type input_dim: int - :param variance: the variance :math:`\\sigma^2` - :type variance: float - :param mapping: the mapping that gives the lengthscale across the input space. - :type mapping: GPy.core.Mapping - :param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one weight variance parameter :math:`\\sigma^2_w`), otherwise there is one weight variance parameter per dimension. - :type ARD: Boolean - :rtype: Kernpart object - - """ - part = parts.gibbs.Gibbs(input_dim,variance,mapping) - return kern(input_dim, [part]) - -def hetero(input_dim, mapping=None, transform=None): - """ - """ - part = parts.hetero.Hetero(input_dim,mapping,transform) - return kern(input_dim, [part]) - -def poly(input_dim,variance=1., weight_variance=None,bias_variance=1.,degree=2, ARD=False): - """ - Construct a polynomial kernel - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param weight_scale: the lengthscale of the kernel - :type weight_scale: vector of weight variances for input weights. - :param bias_variance: the variance of the biases. - :type bias_variance: float - :param degree: the degree of the polynomial - :type degree: int - :param ARD: Auto Relevance Determination (allows for ARD version of covariance) - :type ARD: Boolean - - """ - part = parts.poly.POLY(input_dim,variance,weight_variance,bias_variance,degree,ARD) - return kern(input_dim, [part]) - -def white(input_dim,variance=1.,name='white'): - """ - Construct a white kernel. - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - - """ - part = parts.white.White(input_dim,variance,name=name) - return kern(input_dim, [part]) - -def eq_ode1(output_dim, W=None, rank=1, kappa=None, length_scale=1., decay=None, delay=None): - """Covariance function for first order differential equation driven by an exponentiated quadratic covariance. - - This outputs of this kernel have the form - .. math:: - \frac{\text{d}y_j}{\text{d}t} = \sum_{i=1}^R w_{j,i} f_i(t-\delta_j) +\sqrt{\kappa_j}g_j(t) - d_jy_j(t) - - where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`f_i(t)` and :math:`g_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance. - - :param output_dim: number of outputs driven by latent function. - :type output_dim: int - :param W: sensitivities of each output to the latent driving function. - :type W: ndarray (output_dim x rank). - :param rank: If rank is greater than 1 then there are assumed to be a total of rank latent forces independently driving the system, each with identical covariance. - :type rank: int - :param decay: decay rates for the first order system. - :type decay: array of length output_dim. - :param delay: delay between latent force and output response. - :type delay: array of length output_dim. - :param kappa: diagonal term that allows each latent output to have an independent component to the response. - :type kappa: array of length output_dim. - - .. Note: see first order differential equation examples in GPy.examples.regression for some usage. - """ - part = parts.eq_ode1.Eq_ode1(output_dim, W, rank, kappa, length_scale, decay, delay) - return kern(2, [part]) - - -def exponential(input_dim,variance=1., lengthscale=None, ARD=False): - """ - Construct an exponential kernel - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.exponential.Exponential(input_dim,variance, lengthscale, ARD) - return kern(input_dim, [part]) - -def Matern32(input_dim,variance=1., lengthscale=None, ARD=False): - """ - Construct a Matern 3/2 kernel. - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.Matern32.Matern32(input_dim,variance, lengthscale, ARD) - return kern(input_dim, [part]) - -def Matern52(input_dim, variance=1., lengthscale=None, ARD=False): - """ - Construct a Matern 5/2 kernel. - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param ARD: Auto Relevance Determination (one lengthscale per dimension) - :type ARD: Boolean - - """ - part = parts.Matern52.Matern52(input_dim, variance, lengthscale, ARD) - return kern(input_dim, [part]) - -def bias(input_dim, variance=1., name='bias'): - """ - Construct a bias kernel. - - :param input_dim: dimensionality of the kernel, obligatory - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - - """ - part = parts.bias.Bias(input_dim, variance, name=name) - return kern(input_dim, [part]) - -def finite_dimensional(input_dim, F, G, variances=1., weights=None): - """ - Construct a finite dimensional kernel. - - :param input_dim: the number of input dimensions - :type input_dim: int - :param F: np.array of functions with shape (n,) - the n basis functions - :type F: np.array - :param G: np.array with shape (n,n) - the Gram matrix associated to F - :type G: np.array - :param variances: np.ndarray with shape (n,) - :type: np.ndarray - """ - part = parts.finite_dimensional.FiniteDimensional(input_dim, F, G, variances, weights) - return kern(input_dim, [part]) - -def spline(input_dim, variance=1.): - """ - Construct a spline kernel. - - :param input_dim: Dimensionality of the kernel - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - - """ - part = parts.spline.Spline(input_dim, variance) - return kern(input_dim, [part]) - -def Brownian(input_dim, variance=1.): - """ - Construct a Brownian motion kernel. - - :param input_dim: Dimensionality of the kernel - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - - """ - part = parts.Brownian.Brownian(input_dim, variance) - return kern(input_dim, [part]) - -try: - import sympy as sp - sympy_available = True -except ImportError: - sympy_available = False - -if sympy_available: - from parts.sympykern import spkern - from sympy.parsing.sympy_parser import parse_expr - - def rbf_sympy(input_dim, ARD=False, variance=1., lengthscale=1.): - """ - Radial Basis Function covariance. - """ - X = sp.symbols('x_:' + str(input_dim)) - Z = sp.symbols('z_:' + str(input_dim)) - variance = sp.var('variance',positive=True) - if ARD: - lengthscales = sp.symbols('lengthscale_:' + str(input_dim)) - dist_string = ' + '.join(['(x_%i-z_%i)**2/lengthscale%i**2' % (i, i, i) for i in range(input_dim)]) - dist = parse_expr(dist_string) - f = variance*sp.exp(-dist/2.) - else: - lengthscale = sp.var('lengthscale',positive=True) - dist_string = ' + '.join(['(x_%i-z_%i)**2' % (i, i) for i in range(input_dim)]) - dist = parse_expr(dist_string) - f = variance*sp.exp(-dist/(2*lengthscale**2)) - return kern(input_dim, [spkern(input_dim, f, name='rbf_sympy')]) - - def eq_sympy(input_dim, output_dim, ARD=False, variance=1., lengthscale=1.): - """ - Exponentiated quadratic with multiple outputs. - """ - real_input_dim = input_dim - if output_dim>1: - real_input_dim -= 1 - X = sp.symbols('x_:' + str(real_input_dim)) - Z = sp.symbols('z_:' + str(real_input_dim)) - scale = sp.var('scale_i scale_j',positive=True) - if ARD: - lengthscales = [sp.var('lengthscale%i_i lengthscale%i_j' % i, positive=True) for i in range(real_input_dim)] - shared_lengthscales = [sp.var('shared_lengthscale%i' % i, positive=True) for i in range(real_input_dim)] - dist_string = ' + '.join(['(x_%i-z_%i)**2/(shared_lengthscale%i**2 + lengthscale%i_i*lengthscale%i_j)' % (i, i, i) for i in range(real_input_dim)]) - dist = parse_expr(dist_string) - f = variance*sp.exp(-dist/2.) - else: - lengthscale = sp.var('lengthscale_i lengthscale_j',positive=True) - shared_lengthscale = sp.var('shared_lengthscale',positive=True) - dist_string = ' + '.join(['(x_%i-z_%i)**2' % (i, i) for i in range(real_input_dim)]) - dist = parse_expr(dist_string) - f = scale_i*scale_j*sp.exp(-dist/(2*(shared_lengthscale**2 + lengthscale_i*lengthscale_j))) - return kern(input_dim, [spkern(input_dim, f, output_dim=output_dim, name='eq_sympy')]) - - def sympykern(input_dim, k=None, output_dim=1, name=None, param=None): - """ - A base kernel object, where all the hard work in done by sympy. - - :param k: the covariance function - :type k: a positive definite sympy function of x1, z1, x2, z2... - - To construct a new sympy kernel, you'll need to define: - - a kernel function using a sympy object. Ensure that the kernel is of the form k(x,z). - - that's it! we'll extract the variables from the function k. - - Note: - - to handle multiple inputs, call them x1, z1, etc - - to handle multpile correlated outputs, you'll need to define each covariance function and 'cross' variance function. TODO - """ - return kern(input_dim, [spkern(input_dim, k=k, output_dim=output_dim, name=name, param=param)]) -del sympy_available - -def periodic_exponential(input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): - """ - Construct an periodic exponential kernel - - :param input_dim: dimensionality, only defined for input_dim=1 - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param period: the period - :type period: float - :param n_freq: the number of frequencies considered for the periodic subspace - :type n_freq: int - - """ - part = parts.periodic_exponential.PeriodicExponential(input_dim, variance, lengthscale, period, n_freq, lower, upper) - return kern(input_dim, [part]) - -def periodic_Matern32(input_dim, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): - """ - Construct a periodic Matern 3/2 kernel. - - :param input_dim: dimensionality, only defined for input_dim=1 - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param period: the period - :type period: float - :param n_freq: the number of frequencies considered for the periodic subspace - :type n_freq: int - - """ - part = parts.periodic_Matern32.PeriodicMatern32(input_dim, variance, lengthscale, period, n_freq, lower, upper) - return kern(input_dim, [part]) - -def periodic_Matern52(input_dim, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): - """ - Construct a periodic Matern 5/2 kernel. - - :param input_dim: dimensionality, only defined for input_dim=1 - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the lengthscale of the kernel - :type lengthscale: float - :param period: the period - :type period: float - :param n_freq: the number of frequencies considered for the periodic subspace - :type n_freq: int - - """ - part = parts.periodic_Matern52.PeriodicMatern52(input_dim, variance, lengthscale, period, n_freq, lower, upper) - return kern(input_dim, [part]) - -def prod(k1,k2,tensor=False): - """ - Construct a product kernel over input_dim from two kernels over input_dim - - :param k1, k2: the kernels to multiply - :type k1, k2: kernpart - :param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces - :type tensor: Boolean - :rtype: kernel object - - """ - part = parts.prod.Prod(k1, k2, tensor) - return kern(part.input_dim, [part]) - -def symmetric(k): - """ - Construct a symmetric kernel from an existing kernel - """ - k_ = k.copy() - k_.parts = [symmetric.Symmetric(p) for p in k.parts] - return k_ - -def coregionalize(output_dim,rank=1, W=None, kappa=None): - """ - Coregionlization matrix B, of the form: - - .. math:: - \mathbf{B} = \mathbf{W}\mathbf{W}^\top + kappa \mathbf{I} - - An intrinsic/linear coregionalization kernel of the form: - - .. math:: - k_2(x, y)=\mathbf{B} k(x, y) - - it is obtainded as the tensor product between a kernel k(x,y) and B. - - :param output_dim: the number of outputs to corregionalize - :type output_dim: int - :param rank: number of columns of the W matrix (this parameter is ignored if parameter W is not None) - :type rank: int - :param W: a low rank matrix that determines the correlations between the different outputs, together with kappa it forms the coregionalization matrix B - :type W: numpy array of dimensionality (num_outpus, rank) - :param kappa: a vector which allows the outputs to behave independently - :type kappa: numpy array of dimensionality (output_dim,) - :rtype: kernel object - - """ - p = parts.coregionalize.Coregionalize(output_dim,rank,W,kappa) - return kern(1,[p]) - - -def rational_quadratic(input_dim, variance=1., lengthscale=1., power=1.): - """ - Construct rational quadratic kernel. - - :param input_dim: the number of input dimensions - :type input_dim: int (input_dim=1 is the only value currently supported) - :param variance: the variance :math:`\sigma^2` - :type variance: float - :param lengthscale: the lengthscale :math:`\ell` - :type lengthscale: float - :rtype: kern object - - """ - part = parts.rational_quadratic.RationalQuadratic(input_dim, variance, lengthscale, power) - return kern(input_dim, [part]) - -def fixed(input_dim, K, variance=1.): - """ - Construct a Fixed effect kernel. - - :param input_dim: the number of input dimensions - :type input_dim: int (input_dim=1 is the only value currently supported) - :param K: the variance :math:`\sigma^2` - :type K: np.array - :param variance: kernel variance - :type variance: float - :rtype: kern object - """ - part = parts.fixed.Fixed(input_dim, K, variance) - return kern(input_dim, [part]) - -def rbfcos(input_dim, variance=1., frequencies=None, bandwidths=None, ARD=False): - """ - construct a rbfcos kernel - """ - part = parts.rbfcos.RBFCos(input_dim, variance, frequencies, bandwidths, ARD) - return kern(input_dim, [part]) - -def independent_outputs(k): - """ - Construct a kernel with independent outputs from an existing kernel - """ - for sl in k.input_slices: - assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)" - _parts = [parts.independent_outputs.IndependentOutputs(p) for p in k.parts] - return kern(k.input_dim+1,_parts) - -def hierarchical(k): - """ - TODO This can't be right! Construct a kernel with independent outputs from an existing kernel - """ - # for sl in k.input_slices: - # assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)" - _parts = [parts.hierarchical.Hierarchical(k.parts)] - return kern(k.input_dim+len(k.parts),_parts) - -def build_lcm(input_dim, output_dim, kernel_list = [], rank=1,W=None,kappa=None): - """ - Builds a kernel of a linear coregionalization model - - :input_dim: Input dimensionality - :output_dim: Number of outputs - :kernel_list: List of coregionalized kernels, each element in the list will be multiplied by a different corregionalization matrix - :type kernel_list: list of GPy kernels - :param rank: number tuples of the corregionalization parameters 'coregion_W' - :type rank: integer - - ..note the kernels dimensionality is overwritten to fit input_dim - - """ - - for k in kernel_list: - if k.input_dim <> input_dim: - k.input_dim = input_dim - warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - - k_coreg = coregionalize(output_dim,rank,W,kappa) - kernel = kernel_list[0]**k_coreg.copy() - - for k in kernel_list[1:]: - k_coreg = coregionalize(output_dim,rank,W,kappa) - kernel += k**k_coreg.copy() - - return kernel - -def ODE_1(input_dim=1, varianceU=1., varianceY=1., lengthscaleU=None, lengthscaleY=None): - """ - kernel resultiong from a first order ODE with OU driving GP - - :param input_dim: the number of input dimension, has to be equal to one - :type input_dim: int - :param varianceU: variance of the driving GP - :type varianceU: float - :param lengthscaleU: lengthscale of the driving GP - :type lengthscaleU: float - :param varianceY: 'variance' of the transfer function - :type varianceY: float - :param lengthscaleY: 'lengthscale' of the transfer function - :type lengthscaleY: float - :rtype: kernel object - - """ - part = parts.ODE_1.ODE_1(input_dim, varianceU, varianceY, lengthscaleU, lengthscaleY) - return kern(input_dim, [part]) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 74cd2a1d..cafdd5ee 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -129,7 +129,7 @@ class Coregionalize(Kern): def update_gradients_diag(self, dL_dKdiag, X): index = np.asarray(X, dtype=np.int).flatten() - dL_dKdiag_small = np.array([dL_dKdiag[index==i] for i in xrange(output_dim)]) + dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in xrange(self.output_dim)]) self.W.gradient = 2.*self.W*dL_dKdiag_small[:, None] self.kappa.gradient = dL_dKdiag_small diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 6d3943ae..252a7bc3 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -102,7 +102,7 @@ class IndependentOutputs(Kern): [[collate_grads(dL_dKdiag[s], X[s,:]) for s in slices_i] for slices_i in slices] self.kern._set_gradient(target) -def Hierarchical(kern_f, kern_g, name='hierarchy'): +class Hierarchical(Kern): """ A kernel which can reopresent a simple hierarchical model. @@ -110,10 +110,51 @@ def Hierarchical(kern_f, kern_g, name='hierarchy'): series across irregularly sampled replicates and clusters" http://www.biomedcentral.com/1471-2105/14/252 - The index of the functions is given by the last column in the input X - the rest of the columns of X are passed to the underlying kernel for computation (in blocks). + The index of the functions is given by additional columns in the input X. """ - assert kern_f.input_dim == kern_g.input_dim - return kern_f + IndependentOutputs(kern_g) + def __init__(self, kerns, name='hierarchy'): + assert all([k.input_dim==kerns[0].input_dim for k in kerns]) + super(Hierarchical, self).__init__(kerns[0].input_dim + len(kerns) - 1, name) + self.kerns = kerns + self.add_parameters(self.kerns) + + def K(self,X ,X2=None): + X, slices = X[:,:-self.levels], [index_to_slices(X[:,i]) for i in range(self.kerns[0].input_dim, self.input_dim)] + K = self.kerns[0].K(X, X2) + if X2 is None: + [[[np.copyto(K[s,s], k.K(X[s], None)) for s in slices_i] for slices_i in slices_k] for k, slices_k in zip(self.kerns[1:], slices)] + else: + X2, slices2 = X2[:,:-1],index_to_slices(X2[:,-1]) + [[[[np.copyto(K[s, s2], self.kern.K(X[s],X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices_k,slices_k2)] for k, slices_k, slices_k2 in zip(self.kerns[1:], slices, slices2)] + return target + + def Kdiag(self,X): + X, slices = X[:,:-self.levels], [index_to_slices(X[:,i]) for i in range(self.kerns[0].input_dim, self.input_dim)] + K = self.kerns[0].K(X, X2) + [[[np.copyto(target[s], self.kern.Kdiag(X[s])) for s in slices_i] for slices_i in slices_k] for k, slices_k in zip(self.kerns[1:], slices)] + return target + + def update_gradients_full(self,dL_dK,X,X2=None): + X,slices = X[:,:-1],index_to_slices(X[:,-1]) + if X2 is None: + self.kerns[0].update_gradients_full(dL_dK, X, None) + for k, slices_k in zip(self.kerns[1:], slices): + target = np.zeros(k.size) + def collate_grads(dL, X, X2): + k.update_gradients_full(dL,X,X2) + k._collect_gradient(target) + [[k.update_gradients_full(dL_dK[s,s], X[s], None) for s in slices_i] for slices_i in slices_k] + k._set_gradient(target) + else: + X2, slices2 = X2[:,:-1], index_to_slices(X2[:,-1]) + self.kerns[0].update_gradients_full(dL_dK, X, None) + for k, slices_k in zip(self.kerns[1:], slices): + target = np.zeros(k.size) + def collate_grads(dL, X, X2): + k.update_gradients_full(dL,X,X2) + k._collect_gradient(target) + [[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + k._set_gradient(target) + diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 9f7dcb0a..d7d8f9ca 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -26,41 +26,48 @@ class Kern(Parameterized): raise NotImplementedError def Kdiag(self, Xa): raise NotImplementedError - def psi0(self,Z,posterior_variational): + def psi0(self, Z, variational_posterior): raise NotImplementedError - def psi1(self,Z,posterior_variational): + def psi1(self, Z, variational_posterior): raise NotImplementedError - def psi2(self,Z,posterior_variational): + def psi2(self, Z, variational_posterior): raise NotImplementedError def gradients_X(self, dL_dK, X, X2): raise NotImplementedError def gradients_X_diag(self, dL_dK, X): raise NotImplementedError - def update_gradients_full(self, dL_dK, X): + + def update_gradients_full(self, dL_dK, X, X2): """Set the gradients of all parameters when doing full (N) inference.""" raise NotImplementedError - def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - target = np.zeros(self.size) - self.update_gradients_diag(dL_dKdiag, X) - self._collect_gradient(target) - self.update_gradients_full(dL_dKnm, X, Z) - self._collect_gradient(target) - self.update_gradients_full(dL_dKmm, Z, None) - self._collect_gradient(target) - self._set_gradient(target) - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): - """Set the gradients of all parameters when doing variational (M) inference with uncertain inputs.""" + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + """ + Set the gradients of all parameters when doing inference with + uncertain inputs, using expectations of the kernel. + + The esential maths is + + dL_d{theta_i} = dL_dpsi0 * dpsi0_d{theta_i} + + dL_dpsi1 * dpsi1_d{theta_i} + + dL_dpsi2 * dpsi2_d{theta_i} + """ raise NotImplementedError - def gradients_Z_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - grad = self.gradients_X(dL_dKmm, Z) - grad += self.gradients_X(dL_dKnm.T, Z, X) - return grad - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + + def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + """ + Returns the derivative of the objective wrt Z, using the chain rule + through the expectation variables. + """ raise NotImplementedError - def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + + def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + """ + Compute the gradients wrt the parameters of the variational + distruibution q(X), chain-ruling via the expectations of the kernel + """ raise NotImplementedError - + def plot_ARD(self, *args, **kw): """ See :class:`~GPy.plotting.matplot_dep.kernel_plots` @@ -69,13 +76,13 @@ class Kern(Parameterized): assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ...plotting.matplot_dep import kernel_plots return kernel_plots.plot_ARD(self,*args,**kw) - + def input_sensitivity(self): """ Returns the sensitivity for each dimension of this kernel. """ return np.zeros(self.input_dim) - + def __add__(self, other): """ Overloading of the '+' operator. for more control, see self.add """ return self.add(other) @@ -114,7 +121,8 @@ class Kern(Parameterized): def prod(self, other, tensor=False): """ - Multiply two kernels (either on the same space, or on the tensor product of the input space). + Multiply two kernels (either on the same space, or on the tensor + product of the input space). :param other: the other kernel to be added :type other: GPy.kern @@ -125,209 +133,3 @@ class Kern(Parameterized): assert isinstance(other, Kern), "only kernels can be added to kernels..." from prod import Prod return Prod(self, other, tensor) - - -from GPy.core.model import Model - -class Kern_check_model(Model): - """This is a dummy model class used as a base class for checking that the gradients of a given kernel are implemented correctly. It enables checkgradient() to be called independently on a kernel.""" - def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): - from GPy.kern import RBF - Model.__init__(self, 'kernel_test_model') - num_samples = 20 - num_samples2 = 10 - if kernel==None: - kernel = RBF(1) - if X==None: - X = np.random.randn(num_samples, kernel.input_dim) - if dL_dK==None: - if X2==None: - dL_dK = np.ones((X.shape[0], X.shape[0])) - else: - dL_dK = np.ones((X.shape[0], X2.shape[0])) - - self.kernel=kernel - self.add_parameter(kernel) - self.X = X - self.X2 = X2 - self.dL_dK = dL_dK - - def is_positive_definite(self): - v = np.linalg.eig(self.kernel.K(self.X))[0] - if any(v<-10*sys.float_info.epsilon): - return False - else: - return True - - def log_likelihood(self): - return (self.dL_dK*self.kernel.K(self.X, self.X2)).sum() - - def _log_likelihood_gradients(self): - raise NotImplementedError, "This needs to be implemented to use the kern_check_model class." - -class Kern_check_dK_dtheta(Kern_check_model): - """This class allows gradient checks for the gradient of a kernel with respect to parameters. """ - def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): - Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=X2) - - def _log_likelihood_gradients(self): - return self.kernel._param_grad_helper(self.dL_dK, self.X, self.X2) - - - - - -class Kern_check_dKdiag_dtheta(Kern_check_model): - """This class allows gradient checks of the gradient of the diagonal of a kernel with respect to the parameters.""" - def __init__(self, kernel=None, dL_dK=None, X=None): - Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=None) - if dL_dK==None: - self.dL_dK = np.ones((self.X.shape[0])) - def parameters_changed(self): - self.kernel.update_gradients_full(self.dL_dK, self.X) - - def log_likelihood(self): - return (self.dL_dK*self.kernel.Kdiag(self.X)).sum() - - def _log_likelihood_gradients(self): - return self.kernel.dKdiag_dtheta(self.dL_dK, self.X) - -class Kern_check_dK_dX(Kern_check_model): - """This class allows gradient checks for the gradient of a kernel with respect to X. """ - def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): - Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=X2) - self.remove_parameter(kernel) - self.X = Param('X', self.X) - self.add_parameter(self.X) - def _log_likelihood_gradients(self): - return self.kernel.gradients_X(self.dL_dK, self.X, self.X2).flatten() - -class Kern_check_dKdiag_dX(Kern_check_dK_dX): - """This class allows gradient checks for the gradient of a kernel diagonal with respect to X. """ - def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): - Kern_check_dK_dX.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=None) - if dL_dK==None: - self.dL_dK = np.ones((self.X.shape[0])) - - def log_likelihood(self): - return (self.dL_dK*self.kernel.Kdiag(self.X)).sum() - - def _log_likelihood_gradients(self): - return self.kernel.dKdiag_dX(self.dL_dK, self.X).flatten() - -def kern_test(kern, X=None, X2=None, output_ind=None, verbose=False): - """ - This function runs on kernels to check the correctness of their - implementation. It checks that the covariance function is positive definite - for a randomly generated data set. - - :param kern: the kernel to be tested. - :type kern: GPy.kern.Kernpart - :param X: X input values to test the covariance function. - :type X: ndarray - :param X2: X2 input values to test the covariance function. - :type X2: ndarray - - """ - pass_checks = True - if X==None: - X = np.random.randn(10, kern.input_dim) - if output_ind is not None: - X[:, output_ind] = np.random.randint(kern.output_dim, X.shape[0]) - if X2==None: - X2 = np.random.randn(20, kern.input_dim) - if output_ind is not None: - X2[:, output_ind] = np.random.randint(kern.output_dim, X2.shape[0]) - - if verbose: - print("Checking covariance function is positive definite.") - result = Kern_check_model(kern, X=X).is_positive_definite() - if result and verbose: - print("Check passed.") - if not result: - print("Positive definite check failed for " + kern.name + " covariance function.") - pass_checks = False - return False - - if verbose: - print("Checking gradients of K(X, X) wrt theta.") - result = Kern_check_dK_dtheta(kern, X=X, X2=None).checkgrad(verbose=verbose) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dtheta(kern, X=X, X2=None).checkgrad(verbose=True) - pass_checks = False - return False - - if verbose: - print("Checking gradients of K(X, X2) wrt theta.") - result = Kern_check_dK_dtheta(kern, X=X, X2=X2).checkgrad(verbose=verbose) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dtheta(kern, X=X, X2=X2).checkgrad(verbose=True) - pass_checks = False - return False - - if verbose: - print("Checking gradients of Kdiag(X) wrt theta.") - result = Kern_check_dKdiag_dtheta(kern, X=X).checkgrad(verbose=verbose) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of Kdiag(X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dKdiag_dtheta(kern, X=X).checkgrad(verbose=True) - pass_checks = False - return False - - if verbose: - print("Checking gradients of K(X, X) wrt X.") - try: - result = Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=verbose) - except NotImplementedError: - result=True - if verbose: - print("gradients_X not implemented for " + kern.name) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=True) - pass_checks = False - return False - - if verbose: - print("Checking gradients of K(X, X2) wrt X.") - try: - result = Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=verbose) - except NotImplementedError: - result=True - if verbose: - print("gradients_X not implemented for " + kern.name) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=True) - pass_checks = False - return False - - if verbose: - print("Checking gradients of Kdiag(X) wrt X.") - try: - result = Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=verbose) - except NotImplementedError: - result=True - if verbose: - print("gradients_X not implemented for " + kern.name) - if result and verbose: - print("Check passed.") - if not result: - print("Gradient of Kdiag(X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=True) - pass_checks = False - return False - - return pass_checks diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index 2c4e9fa9..44f71d85 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -22,22 +22,25 @@ class Linear(Kern): :param input_dim: the number of input dimensions :type input_dim: int :param variances: the vector of variances :math:`\sigma^2_i` - :type variances: array or list of the appropriate size (or float if there is only one variance parameter) - :param ARD: Auto Relevance Determination. If equal to "False", the kernel has only one variance parameter \sigma^2, otherwise there is one variance parameter per dimension. + :type variances: array or list of the appropriate size (or float if there + is only one variance parameter) + :param ARD: Auto Relevance Determination. If False, the kernel has only one + variance parameter \sigma^2, otherwise there is one variance + parameter per dimension. :type ARD: Boolean :rtype: kernel object + """ def __init__(self, input_dim, variances=None, ARD=False, name='linear'): super(Linear, self).__init__(input_dim, name) self.ARD = ARD - if ARD == False: + if not ARD: if variances is not None: variances = np.asarray(variances) assert variances.size == 1, "Only one variance needed for non-ARD kernel" else: variances = np.ones(1) - self._Xcache, self._X2cache = np.empty(shape=(2,)) else: if variances is not None: variances = np.asarray(variances) @@ -103,55 +106,47 @@ class Linear(Kern): #---------------------------------------# # PSI statistics # - # variational # #---------------------------------------# - def psi0(self, Z, posterior_variational): - return np.sum(self.variances * self._mu2S(posterior_variational), 1) + def psi0(self, Z, variational_posterior): + return np.sum(self.variances * self._mu2S(variational_posterior), 1) - def psi1(self, Z, posterior_variational): - return self.K(posterior_variational.mean, Z) #the variance, it does nothing + def psi1(self, Z, variational_posterior): + return self.K(variational_posterior.mean, Z) #the variance, it does nothing - def psi2(self, Z, posterior_variational): + def psi2(self, Z, variational_posterior): ZA = Z * self.variances - ZAinner = self._ZAinner(posterior_variational, Z) + ZAinner = self._ZAinner(variational_posterior, Z) return np.dot(ZAinner, ZA.T) - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): - mu, S = posterior_variational.mean, posterior_variational.variance + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + #psi1 + self.update_gradients_full(dL_dpsi1, variational_posterior.mean, Z) # psi0: - tmp = dL_dpsi0[:, None] * self._mu2S(posterior_variational) - if self.ARD: grad = tmp.sum(0) - else: grad = np.atleast_1d(tmp.sum()) - #psi1 - self.update_gradients_full(dL_dpsi1, mu, Z) - grad += self.variances.gradient + tmp = dL_dpsi0[:, None] * self._mu2S(variational_posterior) + if self.ARD: self.variances.gradient += tmp.sum(0) + else: self.variances.gradient += tmp.sum() #psi2 - tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(posterior_variational, Z)[:, :, None, :] * (2. * Z)[None, None, :, :]) - if self.ARD: grad += tmp.sum(0).sum(0).sum(0) - else: grad += tmp.sum() - #from Kmm - self.update_gradients_full(dL_dKmm, Z, None) - self.variances.gradient += grad + tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(variational_posterior, Z)[:, :, None, :] * (2. * Z)[None, None, :, :]) + if self.ARD: self.variances.gradient += tmp.sum(0).sum(0).sum(0) + else: self.variances.gradient += tmp.sum() - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): - # Kmm - grad = self.gradients_X(dL_dKmm, Z, None) + def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): #psi1 - grad += self.gradients_X(dL_dpsi1.T, Z, posterior_variational.mean) + grad = self.gradients_X(dL_dpsi1.T, Z, variational_posterior.mean) #psi2 - self._weave_dpsi2_dZ(dL_dpsi2, Z, posterior_variational, grad) + self._weave_dpsi2_dZ(dL_dpsi2, Z, variational_posterior, grad) return grad - def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z): - grad_mu, grad_S = np.zeros(posterior_variational.mean.shape), np.zeros(posterior_variational.mean.shape) + def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + grad_mu, grad_S = np.zeros(variational_posterior.mean.shape), np.zeros(variational_posterior.mean.shape) # psi0 - grad_mu += dL_dpsi0[:, None] * (2.0 * posterior_variational.mean * self.variances) + grad_mu += dL_dpsi0[:, None] * (2.0 * variational_posterior.mean * self.variances) grad_S += dL_dpsi0[:, None] * self.variances # psi1 grad_mu += (dL_dpsi1[:, :, None] * (Z * self.variances)).sum(1) # psi2 - self._weave_dpsi2_dmuS(dL_dpsi2, Z, posterior_variational, grad_mu, grad_S) + self._weave_dpsi2_dmuS(dL_dpsi2, Z, variational_posterior, grad_mu, grad_S) return grad_mu, grad_S @@ -160,7 +155,7 @@ class Linear(Kern): #--------------------------------------------------# - def _weave_dpsi2_dmuS(self, dL_dpsi2, Z, pv, target_mu, target_S): + def _weave_dpsi2_dmuS(self, dL_dpsi2, Z, vp, target_mu, target_S): # Think N,num_inducing,num_inducing,input_dim ZA = Z * self.variances AZZA = ZA.T[:, None, :, None] * ZA[None, :, None, :] @@ -203,16 +198,15 @@ class Linear(Kern): weave_options = {'headers' : [''], 'extra_compile_args': ['-fopenmp -O3'], #-march=native'], 'extra_link_args' : ['-lgomp']} - - mu = pv.mean + mu = vp.mean N,num_inducing,input_dim,mu = mu.shape[0],Z.shape[0],mu.shape[1],param_to_array(mu) weave.inline(code, support_code=support_code, libraries=['gomp'], arg_names=['N','num_inducing','input_dim','mu','AZZA','AZZA_2','target_mu','target_S','dL_dpsi2'], type_converters=weave.converters.blitz,**weave_options) - def _weave_dpsi2_dZ(self, dL_dpsi2, Z, pv, target): - AZA = self.variances*self._ZAinner(pv, Z) + def _weave_dpsi2_dZ(self, dL_dpsi2, Z, vp, target): + AZA = self.variances*self._ZAinner(vp, Z) code=""" int n,m,mm,q; #pragma omp parallel for private(n,mm,q) @@ -234,23 +228,23 @@ class Linear(Kern): 'extra_compile_args': ['-fopenmp -O3'], #-march=native'], 'extra_link_args' : ['-lgomp']} - N,num_inducing,input_dim = pv.mean.shape[0],Z.shape[0],pv.mean.shape[1] - mu = param_to_array(pv.mean) + N,num_inducing,input_dim = vp.mean.shape[0],Z.shape[0],vp.mean.shape[1] + mu = param_to_array(vp.mean) weave.inline(code, support_code=support_code, libraries=['gomp'], arg_names=['N','num_inducing','input_dim','AZA','target','dL_dpsi2'], type_converters=weave.converters.blitz,**weave_options) - def _mu2S(self, pv): - return np.square(pv.mean) + pv.variance + def _mu2S(self, vp): + return np.square(vp.mean) + vp.variance - def _ZAinner(self, pv, Z): + def _ZAinner(self, vp, Z): ZA = Z*self.variances - inner = (pv.mean[:, None, :] * pv.mean[:, :, None]) - diag_indices = np.diag_indices(pv.mean.shape[1], 2) - inner[:, diag_indices[0], diag_indices[1]] += pv.variance + inner = (vp.mean[:, None, :] * vp.mean[:, :, None]) + diag_indices = np.diag_indices(vp.mean.shape[1], 2) + inner[:, diag_indices[0], diag_indices[1]] += vp.variance - return np.dot(ZA, inner).swapaxes(0, 1) # NOTE: self.ZAinner \in [num_inducing x N x input_dim]! + return np.dot(ZA, inner).swapaxes(0, 1) # NOTE: self.ZAinner \in [num_inducing x num_data x input_dim]! def input_sensitivity(self): if self.ARD: return self.variances diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index 1d033f70..bb809356 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -42,10 +42,6 @@ class Prod(Kern): self.k1.update_gradients_full(dL_dK*self.k2(X[:,self.slice2]), X[:,self.slice1]) self.k2.update_gradients_full(dL_dK*self.k1(X[:,self.slice1]), X[:,self.slice2]) - def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - self.k1.update_gradients_sparse(dL_dKmm * self.k2.K(Z[:,self.slice2]), dL_dKnm * self.k2(X[:,self.slice2], Z[:,self.slice2]), dL_dKdiag * self.k2.Kdiag(X[:,self.slice2]), X[:,self.slice1], Z[:,self.slice1] ) - self.k2.update_gradients_sparse(dL_dKmm * self.k1.K(Z[:,self.slice1]), dL_dKnm * self.k1(X[:,self.slice1], Z[:,self.slice1]), dL_dKdiag * self.k1.Kdiag(X[:,self.slice1]), X[:,self.slice2], Z[:,self.slice2] ) - def gradients_X(self, dL_dK, X, X2=None): target = np.zeros(X.shape) if X2 is None: diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 28115fae..7c43b18d 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -9,143 +9,73 @@ from ...util.linalg import tdot from ...util.misc import fast_array_equal, param_to_array from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp +from stationary import Stationary -class RBF(Kern): +class RBF(Stationary): """ Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: .. math:: - k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2} + k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) - where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input. - - :param input_dim: the number of input dimensions - :type input_dim: int - :param variance: the variance of the kernel - :type variance: float - :param lengthscale: the vector of lengthscale of the kernel - :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter) - :param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension. - :type ARD: Boolean - :rtype: kernel object - - .. Note: this object implements both the ARD and 'spherical' version of the function """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='rbf'): - super(RBF, self).__init__(input_dim, name) - self.input_dim = input_dim - self.ARD = ARD - - if not ARD: - if lengthscale is not None: - lengthscale = np.asarray(lengthscale) - assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel" - else: - lengthscale = np.ones(1) - else: - if lengthscale is not None: - lengthscale = np.asarray(lengthscale) - assert lengthscale.size == self.input_dim, "bad number of lengthscales" - else: - lengthscale = np.ones(self.input_dim) - - self.variance = Param('variance', variance, Logexp()) - - self.lengthscale = Param('lengthscale', lengthscale, Logexp()) - self.lengthscale.add_observer(self, self.update_lengthscale) - self.update_lengthscale(self.lengthscale) - - self.add_parameters(self.variance, self.lengthscale) - self.parameters_changed() # initializes cache - + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='RBF'): + super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, name) self.weave_options = {} - def update_lengthscale(self, l): - self.lengthscale2 = np.square(self.lengthscale) + def K_of_r(self, r): + return self.variance * np.exp(-0.5 * r**2) + + def dK_dr(self, r): + return -r*self.K_of_r(r) + + #---------------------------------------# + # PSI statistics # + #---------------------------------------# def parameters_changed(self): # reset cached results - self._X, self._X2 = np.empty(shape=(2, 1)) self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S - def K(self, X, X2=None): - self._K_computations(X, X2) - return self.variance * self._K_dvar - def Kdiag(self, X): - ret = np.ones(X.shape[0]) - ret[:] = self.variance - return ret + def psi0(self, Z, variational_posterior): + return self.Kdiag(variational_posterior.mean) - def psi0(self, Z, posterior_variational): - mu = posterior_variational.mean - ret = np.empty(mu.shape[0], dtype=np.float64) - ret[:] = self.variance - return ret - - def psi1(self, Z, posterior_variational): - mu = posterior_variational.mean - S = posterior_variational.variance + def psi1(self, Z, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance self._psi_computations(Z, mu, S) return self._psi1 - def psi2(self, Z, posterior_variational): - mu = posterior_variational.mean - S = posterior_variational.variance + def psi2(self, Z, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance self._psi_computations(Z, mu, S) return self._psi2 - def update_gradients_full(self, dL_dK, X): - self._K_computations(X, None) - self.variance.gradient = np.sum(self._K_dvar * dL_dK) - if self.ARD: - self.lengthscale.gradient = self._dL_dlengthscales_via_K(dL_dK, X, None) - else: - self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) - - def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - #contributions from Kdiag - self.variance.gradient = np.sum(dL_dKdiag) - - #from Knm - self._K_computations(X, Z) - self.variance.gradient += np.sum(dL_dKnm * self._K_dvar) - if self.ARD: - self.lengthscale.gradient = self._dL_dlengthscales_via_K(dL_dKnm, X, Z) - - else: - self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKnm) - - #from Kmm - self._K_computations(Z, None) - self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) - if self.ARD: - self.lengthscale.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) - else: - self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): - mu = posterior_variational.mean - S = posterior_variational.variance + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance self._psi_computations(Z, mu, S) + l2 = self.lengthscale **2 #contributions from psi0: - self.variance.gradient = np.sum(dL_dpsi0) + self.variance.gradient += np.sum(dL_dpsi0) #from psi1 self.variance.gradient += np.sum(dL_dpsi1 * self._psi1 / self.variance) d_length = self._psi1[:,:,None] * ((self._psi1_dist_sq - 1.)/(self.lengthscale*self._psi1_denom) +1./self.lengthscale) dpsi1_dlength = d_length * dL_dpsi1[:, :, None] if not self.ARD: - self.lengthscale.gradient = dpsi1_dlength.sum() + self.lengthscale.gradient += dpsi1_dlength.sum() else: - self.lengthscale.gradient = dpsi1_dlength.sum(0).sum(0) + self.lengthscale.gradient += dpsi1_dlength.sum(0).sum(0) #from psi2 d_var = 2.*self._psi2 / self.variance - d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom) + d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / l2) / (self.lengthscale * self._psi2_denom) self.variance.gradient += np.sum(dL_dpsi2 * d_var) dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None] @@ -154,87 +84,45 @@ class RBF(Kern): else: self.lengthscale.gradient += dpsi2_dlength.sum(0).sum(0).sum(0) - #from Kmm - self._K_computations(Z, None) - self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) - if self.ARD: - self.lengthscale.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) - else: - self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): - mu = posterior_variational.mean - S = posterior_variational.variance + def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance self._psi_computations(Z, mu, S) + l2 = self.lengthscale **2 #psi1 - denominator = (self.lengthscale2 * (self._psi1_denom)) + denominator = (l2 * (self._psi1_denom)) dpsi1_dZ = -self._psi1[:, :, None] * ((self._psi1_dist / denominator)) grad = np.sum(dL_dpsi1[:, :, None] * dpsi1_dZ, 0) #psi2 - term1 = self._psi2_Zdist / self.lengthscale2 # num_inducing, num_inducing, input_dim - term2 = self._psi2_mudist / self._psi2_denom / self.lengthscale2 # N, num_inducing, num_inducing, input_dim + term1 = self._psi2_Zdist / l2 # num_inducing, num_inducing, input_dim + term2 = self._psi2_mudist / self._psi2_denom / l2 # N, num_inducing, num_inducing, input_dim dZ = self._psi2[:, :, :, None] * (term1[None] + term2) grad += 2*(dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0) - grad += self.gradients_X(dL_dKmm, Z, None) - return grad - def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): - mu = posterior_variational.mean - S = posterior_variational.variance + def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + mu = variational_posterior.mean + S = variational_posterior.variance self._psi_computations(Z, mu, S) + l2 = self.lengthscale **2 #psi1 - tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom + tmp = self._psi1[:, :, None] / l2 / self._psi1_denom grad_mu = np.sum(dL_dpsi1[:, :, None] * tmp * self._psi1_dist, 1) grad_S = np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1) #psi2 - tmp = self._psi2[:, :, :, None] / self.lengthscale2 / self._psi2_denom + tmp = self._psi2[:, :, :, None] / l2 / self._psi2_denom grad_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1) grad_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1) return grad_mu, grad_S - def gradients_X(self, dL_dK, X, X2=None): - #if self._X is None or X.base is not self._X.base or X2 is not None: - self._K_computations(X, X2) - if X2 is None: - _K_dist = 2*(X[:, None, :] - X[None, :, :]) - else: - _K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena. - gradients_X = (-self.variance / self.lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2)) - return np.sum(gradients_X * dL_dK.T[:, :, None], 0) - - def dKdiag_dX(self, dL_dKdiag, X): - return np.zeros(X.shape[0]) - - #---------------------------------------# - # PSI statistics # - #---------------------------------------# - #---------------------------------------# # Precomputations # #---------------------------------------# - def _K_computations(self, X, X2): - #params = self._get_params() - if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2)):# and fast_array_equal(self._params_save , params)): - #self._X = X.copy() - #self._params_save = params.copy() - if X2 is None: - self._X2 = None - X = X / self.lengthscale - Xsquare = np.sum(np.square(X), 1) - self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :]) - else: - self._X2 = X2.copy() - X = X / self.lengthscale - X2 = X2 / self.lengthscale - self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :]) - self._K_dvar = np.exp(-0.5 * self._K_dist2) - def _dL_dlengthscales_via_K(self, dL_dK, X, X2): """ A helper function for update_gradients_* methods @@ -301,19 +189,20 @@ class RBF(Kern): if Z_changed or not fast_array_equal(mu, self._mu) or not fast_array_equal(S, self._S): # something's changed. recompute EVERYTHING + l2 = self.lengthscale **2 # psi1 - self._psi1_denom = S[:, None, :] / self.lengthscale2 + 1. + self._psi1_denom = S[:, None, :] / l2 + 1. self._psi1_dist = Z[None, :, :] - mu[:, None, :] - self._psi1_dist_sq = np.square(self._psi1_dist) / self.lengthscale2 / self._psi1_denom + self._psi1_dist_sq = np.square(self._psi1_dist) / l2 / self._psi1_denom self._psi1_exponent = -0.5 * np.sum(self._psi1_dist_sq + np.log(self._psi1_denom), -1) self._psi1 = self.variance * np.exp(self._psi1_exponent) # psi2 - self._psi2_denom = 2.*S[:, None, None, :] / self.lengthscale2 + 1. # N,M,M,Q + self._psi2_denom = 2.*S[:, None, None, :] / l2 + 1. # N,M,M,Q self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu, self._psi2_Zhat) # self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q - # self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom) + # self._psi2_mudist_sq = np.square(self._psi2_mudist)/(l2*self._psi2_denom) # self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q self._psi2 = np.square(self.variance) * np.exp(self._psi2_exponent) # N,M,M,Q @@ -332,11 +221,11 @@ class RBF(Kern): psi2_Zdist_sq = self._psi2_Zdist_sq _psi2_denom = self._psi2_denom.squeeze().reshape(N, self.input_dim) half_log_psi2_denom = 0.5 * np.log(self._psi2_denom).squeeze().reshape(N, self.input_dim) - variance_sq = float(np.square(self.variance)) + variance_sq = np.float64(np.square(self.variance)) if self.ARD: - lengthscale2 = self.lengthscale2 + lengthscale2 = self.lengthscale **2 else: - lengthscale2 = np.ones(input_dim) * self.lengthscale2 + lengthscale2 = np.ones(input_dim) * self.lengthscale2**2 code = """ double tmp; diff --git a/GPy/kern/_src/rbfcos.py b/GPy/kern/_src/rbfcos.py deleted file mode 100644 index 9a4b8ab2..00000000 --- a/GPy/kern/_src/rbfcos.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2012, James Hensman and Andrew Gordon Wilson -# Licensed under the BSD 3-clause license (see LICENSE.txt) - - -from kernpart import Kernpart -import numpy as np -from ...core.parameterization import Param - -class RBFCos(Kernpart): - def __init__(self,input_dim,variance=1.,frequencies=None,bandwidths=None,ARD=False): - self.input_dim = input_dim - self.name = 'rbfcos' - if self.input_dim>10: - print "Warning: the rbfcos kernel requires a lot of memory for high dimensional inputs" - self.ARD = ARD - - #set the default frequencies and bandwidths, appropriate num_params - if ARD: - self.num_params = 2*self.input_dim + 1 - if frequencies is not None: - frequencies = np.asarray(frequencies) - assert frequencies.size == self.input_dim, "bad number of frequencies" - else: - frequencies = np.ones(self.input_dim) - if bandwidths is not None: - bandwidths = np.asarray(bandwidths) - assert bandwidths.size == self.input_dim, "bad number of bandwidths" - else: - bandwidths = np.ones(self.input_dim) - else: - self.num_params = 3 - if frequencies is not None: - frequencies = np.asarray(frequencies) - assert frequencies.size == 1, "Exactly one frequency needed for non-ARD kernel" - else: - frequencies = np.ones(1) - - if bandwidths is not None: - bandwidths = np.asarray(bandwidths) - assert bandwidths.size == 1, "Exactly one bandwidth needed for non-ARD kernel" - else: - bandwidths = np.ones(1) - - self.variance = Param('variance', variance) - self.frequencies = Param('frequencies', frequencies) - self.bandwidths = Param('bandwidths', bandwidths) - - #initialise cache - self._X, self._X2 = np.empty(shape=(3,1)) - -# def _get_params(self): -# return np.hstack((self.variance,self.frequencies, self.bandwidths)) - -# def _set_params(self,x): -# assert x.size==(self.num_params) -# if self.ARD: -# self.variance = x[0] -# self.frequencies = x[1:1+self.input_dim] -# self.bandwidths = x[1+self.input_dim:] -# else: -# self.variance, self.frequencies, self.bandwidths = x - -# def _get_param_names(self): -# if self.num_params == 3: -# return ['variance','frequency','bandwidth'] -# else: -# return ['variance']+['frequency_%i'%i for i in range(self.input_dim)]+['bandwidth_%i'%i for i in range(self.input_dim)] - - def K(self,X,X2,target): - self._K_computations(X,X2) - target += self.variance*self._dvar - - def Kdiag(self,X,target): - np.add(target,self.variance,target) - - def _param_grad_helper(self,dL_dK,X,X2,target): - self._K_computations(X,X2) - target[0] += np.sum(dL_dK*self._dvar) - if self.ARD: - for q in xrange(self.input_dim): - target[q+1] += -2.*np.pi*self.variance*np.sum(dL_dK*self._dvar*np.tan(2.*np.pi*self._dist[:,:,q]*self.frequencies[q])*self._dist[:,:,q]) - target[q+1+self.input_dim] += -2.*np.pi**2*self.variance*np.sum(dL_dK*self._dvar*self._dist2[:,:,q]) - else: - target[1] += -2.*np.pi*self.variance*np.sum(dL_dK*self._dvar*np.sum(np.tan(2.*np.pi*self._dist*self.frequencies)*self._dist,-1)) - target[2] += -2.*np.pi**2*self.variance*np.sum(dL_dK*self._dvar*self._dist2.sum(-1)) - - - def dKdiag_dtheta(self,dL_dKdiag,X,target): - target[0] += np.sum(dL_dKdiag) - - def gradients_X(self,dL_dK,X,X2,target): - #TODO!!! - raise NotImplementedError - - def dKdiag_dX(self,dL_dKdiag,X,target): - pass - - def parameters_changed(self): - self._rbf_part = np.exp(-2.*np.pi**2*np.sum(self._dist2*self.bandwidths,-1)) - self._cos_part = np.prod(np.cos(2.*np.pi*self._dist*self.frequencies),-1) - self._dvar = self._rbf_part*self._cos_part - - def _K_computations(self,X,X2): - if not (np.all(X==self._X) and np.all(X2==self._X2)): - if X2 is None: X2 = X - self._X = X.copy() - self._X2 = X2.copy() - - #do the distances: this will be high memory for large input_dim - #NB: we don't take the abs of the dist because cos is symmetric - self._dist = X[:,None,:] - X2[None,:,:] - self._dist2 = np.square(self._dist) - - #ensure the next section is computed: - self._params = np.empty(self.num_params) diff --git a/GPy/kern/_src/ssrbf.py b/GPy/kern/_src/ssrbf.py new file mode 100644 index 00000000..60df7225 --- /dev/null +++ b/GPy/kern/_src/ssrbf.py @@ -0,0 +1,252 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +from kern import Kern +import numpy as np +from ...util.linalg import tdot +from ...util.config import * +from ...util.caching import cache_this +from stationary import Stationary + +class SSRBF(Stationary): + """ + Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel + for Spike-and-Slab GPLVM + + .. math:: + + k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2} + + where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input. + + :param input_dim: the number of input dimensions + :type input_dim: int + :param variance: the variance of the kernel + :type variance: float + :param lengthscale: the vector of lengthscale of the kernel + :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter) + :param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension. + :type ARD: Boolean + :rtype: kernel object + + .. Note: this object implements both the ARD and 'spherical' version of the function + """ + + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=True, name='SSRBF'): + assert ARD==True, "Not Implemented!" + super(SSRBF, self).__init__(input_dim, variance, lengthscale, ARD, name) + + def K_of_r(self, r): + return self.variance * np.exp(-0.5 * r**2) + + def dK_dr(self, r): + return -r*self.K_of_r(r) + + def parameters_changed(self): + pass + + def Kdiag(self, X): + ret = np.empty(X.shape[0]) + ret[:] = self.variance + return ret + + #---------------------------------------# + # PSI statistics # + #---------------------------------------# + + def psi0(self, Z, posterior_variational): + ret = np.empty(posterior_variational.mean.shape[0]) + ret[:] = self.variance + return ret + + def psi1(self, Z, posterior_variational): + self._psi_computations(Z, posterior_variational.mean, posterior_variational.variance, posterior_variational.binary_prob) + return self._psi1 + + def psi2(self, Z, posterior_variational): + self._psi_computations(Z, posterior_variational.mean, posterior_variational.variance, posterior_variational.binary_prob) + return self._psi2 + + def dL_dpsi0_dmuSgamma(self, dL_dpsi0, Z, mu, S, gamma, target_mu, target_S, target_gamma): + pass + + + def dL_dpsi1_dmuSgamma(self, dL_dpsi1, Z, mu, S, gamma, target_mu, target_S, target_gamma): + self._psi_computations(Z, mu, S, gamma) + target_mu += (dL_dpsi1[:, :, None] * self._dpsi1_dmu).sum(axis=1) + target_S += (dL_dpsi1[:, :, None] * self._dpsi1_dS).sum(axis=1) + target_gamma += (dL_dpsi1[:,:,None] * self._dpsi1_dgamma).sum(axis=1) + + + def dL_dpsi2_dmuSgamma(self, dL_dpsi2, Z, mu, S, gamma, target_mu, target_S, target_gamma): + """Think N,num_inducing,num_inducing,input_dim """ + self._psi_computations(Z, mu, S, gamma) + target_mu += (dL_dpsi2[:, :, :, None] * self._dpsi2_dmu).reshape(mu.shape[0],-1,mu.shape[1]).sum(axis=1) + target_S += (dL_dpsi2[:, :, :, None] * self._dpsi2_dS).reshape(S.shape[0],-1,S.shape[1]).sum(axis=1) + target_gamma += (dL_dpsi2[:,:,:, None] *self._dpsi2_dgamma).reshape(gamma.shape[0],-1,gamma.shape[1]).sum(axis=1) + + def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + self._psi_computations(Z, posterior_variational.mean, posterior_variational.variance, posterior_variational.binary_prob) + + #contributions from psi0: + self.variance.gradient = np.sum(dL_dpsi0) + + #from psi1 + self.variance.gradient += np.sum(dL_dpsi1 * self._dpsi1_dvariance) + self.lengthscale.gradient = (dL_dpsi1[:,:,None]*self._dpsi1_dlengthscale).reshape(-1,self.input_dim).sum(axis=0) + + + #from psi2 + self.variance.gradient += (dL_dpsi2 * self._dpsi2_dvariance).sum() + self.lengthscale.gradient += (dL_dpsi2[:,:,:,None] * self._dpsi2_dlengthscale).reshape(-1,self.input_dim).sum(axis=0) + + #from Kmm + self._K_computations(Z, None) + dvardLdK = self._K_dvar * dL_dKmm + var_len3 = self.variance / (np.square(self.lengthscale)*self.lengthscale) + + self.variance.gradient += np.sum(dvardLdK) + self.lengthscale.gradient += (np.square(Z[:,None,:]-Z[None,:,:])*dvardLdK[:,:,None]).reshape(-1,self.input_dim).sum(axis=0)*var_len3 + + + def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + self._psi_computations(Z, posterior_variational.mean, posterior_variational.variance, posterior_variational.binary_prob) + + #psi1 + grad = (dL_dpsi1[:, :, None] * self._dpsi1_dZ).sum(axis=0) + + #psi2 + grad += (dL_dpsi2[:, :, :, None] * self._dpsi2_dZ).sum(axis=0).sum(axis=1) + + grad += self.gradients_X(dL_dKmm, Z, None) + + return grad + + def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational): + ndata = posterior_variational.mean.shape[0] + self._psi_computations(Z, posterior_variational.mean, posterior_variational.variance, posterior_variational.binary_prob) + #psi1 + grad_mu = (dL_dpsi1[:, :, None] * self._dpsi1_dmu).sum(axis=1) + grad_S = (dL_dpsi1[:, :, None] * self._dpsi1_dS).sum(axis=1) + grad_gamma = (dL_dpsi1[:,:,None] * self._dpsi1_dgamma).sum(axis=1) + #psi2 + grad_mu += (dL_dpsi2[:, :, :, None] * self._dpsi2_dmu).reshape(ndata,-1,self.input_dim).sum(axis=1) + grad_S += (dL_dpsi2[:, :, :, None] * self._dpsi2_dS).reshape(ndata,-1,self.input_dim).sum(axis=1) + grad_gamma += (dL_dpsi2[:,:,:, None] *self._dpsi2_dgamma).reshape(ndata,-1,self.input_dim).sum(axis=1) + + return grad_mu, grad_S, grad_gamma + + def gradients_X(self, dL_dK, X, X2=None): + #if self._X is None or X.base is not self._X.base or X2 is not None: + if X2==None: + _K_dist = X[:,None,:] - X[None,:,:] + _K_dist2 = np.square(_K_dist/self.lengthscale).sum(axis=-1) + dK_dX = self.variance*np.exp(-0.5 * self._K_dist2[:,:,None]) * (-2.*_K_dist/np.square(self.lengthscale)) + dL_dX = (dL_dK[:,:,None] * dK_dX).sum(axis=1) + else: + _K_dist = X[:,None,:] - X2[None,:,:] + _K_dist2 = np.square(_K_dist/self.lengthscale).sum(axis=-1) + dK_dX = self.variance*np.exp(-0.5 * self._K_dist2[:,:,None]) * (-_K_dist/np.square(self.lengthscale)) + dL_dX = (dL_dK[:,:,None] * dK_dX).sum(axis=1) + return dL_dX + + #---------------------------------------# + # Precomputations # + #---------------------------------------# + + @cache_this(1) + def _K_computations(self, X, X2): + """ + K(X,X2) - X is NxQ + Q -> input dimension (self.input_dim) + """ + if X2 is None: + self._X2 = None + + X = X / self.lengthscale + Xsquare = np.sum(np.square(X), axis=1) + self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :]) + else: + self._X2 = X2.copy() + + X = X / self.lengthscale + X2 = X2 / self.lengthscale + self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), axis=1)[:, None] + np.sum(np.square(X2), axis=1)[None, :]) + self._K_dvar = np.exp(-0.5 * self._K_dist2) + + @cache_this(1) + def _psi_computations(self, Z, mu, S, gamma): + """ + Z - MxQ + mu - NxQ + S - NxQ + gamma - NxQ + """ + # here are the "statistics" for psi1 and psi2 + # Produced intermediate results: + # _psi1 NxM + # _dpsi1_dvariance NxM + # _dpsi1_dlengthscale NxMxQ + # _dpsi1_dZ NxMxQ + # _dpsi1_dgamma NxMxQ + # _dpsi1_dmu NxMxQ + # _dpsi1_dS NxMxQ + # _psi2 NxMxM + # _psi2_dvariance NxMxM + # _psi2_dlengthscale NxMxMxQ + # _psi2_dZ NxMxMxQ + # _psi2_dgamma NxMxMxQ + # _psi2_dmu NxMxMxQ + # _psi2_dS NxMxMxQ + + lengthscale2 = np.square(self.lengthscale) + + _psi2_Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q + _psi2_Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q + _psi2_Zdist_sq = np.square(_psi2_Zdist / self.lengthscale) # M,M,Q + _psi2_Z_sq_sum = (np.square(Z[:,None,:])+np.square(Z[None,:,:]))/lengthscale2 # MxMxQ + + # psi1 + _psi1_denom = S[:, None, :] / lengthscale2 + 1. # Nx1xQ + _psi1_denom_sqrt = np.sqrt(_psi1_denom) #Nx1xQ + _psi1_dist = Z[None, :, :] - mu[:, None, :] # NxMxQ + _psi1_dist_sq = np.square(_psi1_dist) / (lengthscale2 * _psi1_denom) # NxMxQ + _psi1_common = gamma[:,None,:] / (lengthscale2*_psi1_denom*_psi1_denom_sqrt) #Nx1xQ + _psi1_exponent1 = np.log(gamma[:,None,:]) -0.5 * (_psi1_dist_sq + np.log(_psi1_denom)) # NxMxQ + _psi1_exponent2 = np.log(1.-gamma[:,None,:]) -0.5 * (np.square(Z[None,:,:])/lengthscale2) # NxMxQ + _psi1_exponent = np.log(np.exp(_psi1_exponent1) + np.exp(_psi1_exponent2)) #NxMxQ + _psi1_exp_sum = _psi1_exponent.sum(axis=-1) #NxM + _psi1_exp_dist_sq = np.exp(-0.5*_psi1_dist_sq) # NxMxQ + _psi1_exp_Z = np.exp(-0.5*np.square(Z[None,:,:])/lengthscale2) # 1xMxQ + _psi1_q = self.variance * np.exp(_psi1_exp_sum[:,:,None] - _psi1_exponent) # NxMxQ + self._psi1 = self.variance * np.exp(_psi1_exp_sum) # NxM + self._dpsi1_dvariance = self._psi1 / self.variance # NxM + self._dpsi1_dgamma = _psi1_q * (_psi1_exp_dist_sq/_psi1_denom_sqrt-_psi1_exp_Z) # NxMxQ + self._dpsi1_dmu = _psi1_q * (_psi1_exp_dist_sq * _psi1_dist * _psi1_common) # NxMxQ + self._dpsi1_dS = _psi1_q * (_psi1_exp_dist_sq * _psi1_common * 0.5 * (_psi1_dist_sq - 1.)) # NxMxQ + self._dpsi1_dZ = _psi1_q * (- _psi1_common * _psi1_dist * _psi1_exp_dist_sq - (1-gamma[:,None,:])/lengthscale2*Z[None,:,:]*_psi1_exp_Z) # NxMxQ + self._dpsi1_dlengthscale = 2.*self.lengthscale*_psi1_q * (0.5*_psi1_common*(S[:,None,:]/lengthscale2+_psi1_dist_sq)*_psi1_exp_dist_sq + 0.5*(1-gamma[:,None,:])*np.square(Z[None,:,:]/lengthscale2)*_psi1_exp_Z) # NxMxQ + + + # psi2 + _psi2_denom = 2.*S[:, None, None, :] / lengthscale2 + 1. # Nx1x1xQ + _psi2_denom_sqrt = np.sqrt(_psi2_denom) + _psi2_mudist = mu[:,None,None,:]-_psi2_Zhat #N,M,M,Q + _psi2_mudist_sq = np.square(_psi2_mudist)/(lengthscale2*_psi2_denom) + _psi2_common = gamma[:,None,None,:]/(lengthscale2 * _psi2_denom * _psi2_denom_sqrt) # Nx1x1xQ + _psi2_exponent1 = -_psi2_Zdist_sq -_psi2_mudist_sq -0.5*np.log(_psi2_denom)+np.log(gamma[:,None,None,:]) #N,M,M,Q + _psi2_exponent2 = np.log(1.-gamma[:,None,None,:]) - 0.5*(_psi2_Z_sq_sum) # NxMxMxQ + _psi2_exponent = np.log(np.exp(_psi2_exponent1) + np.exp(_psi2_exponent2)) + _psi2_exp_sum = _psi2_exponent.sum(axis=-1) #NxM + _psi2_q = np.square(self.variance) * np.exp(_psi2_exp_sum[:,:,:,None]-_psi2_exponent) # NxMxMxQ + _psi2_exp_dist_sq = np.exp(-_psi2_Zdist_sq -_psi2_mudist_sq) # NxMxMxQ + _psi2_exp_Z = np.exp(-0.5*_psi2_Z_sq_sum) # MxMxQ + self._psi2 = np.square(self.variance) * np.exp(_psi2_exp_sum) # N,M,M + self._dpsi2_dvariance = 2. * self._psi2/self.variance # NxMxM + self._dpsi2_dgamma = _psi2_q * (_psi2_exp_dist_sq/_psi2_denom_sqrt - _psi2_exp_Z) # NxMxMxQ + self._dpsi2_dmu = _psi2_q * (-2.*_psi2_common*_psi2_mudist * _psi2_exp_dist_sq) # NxMxMxQ + self._dpsi2_dS = _psi2_q * (_psi2_common * (2.*_psi2_mudist_sq - 1.) * _psi2_exp_dist_sq) # NxMxMxQ + self._dpsi2_dZ = 2.*_psi2_q * (_psi2_common*(-_psi2_Zdist*_psi2_denom+_psi2_mudist)*_psi2_exp_dist_sq - (1-gamma[:,None,None,:])*Z[:,None,:]/lengthscale2*_psi2_exp_Z) # NxMxMxQ + self._dpsi2_dlengthscale = 2.*self.lengthscale* _psi2_q * (_psi2_common*(S[:,None,None,:]/lengthscale2+_psi2_Zdist_sq*_psi2_denom+_psi2_mudist_sq)*_psi2_exp_dist_sq+(1-gamma[:,None,None,:])*_psi2_Z_sq_sum*0.5/lengthscale2*_psi2_exp_Z) # NxMxMxQ + \ No newline at end of file diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 09ab0ded..135e3f9e 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -3,6 +3,7 @@ from kern import Kern +import numpy as np from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np @@ -18,32 +19,32 @@ class Static(Kern): ret[:] = self.variance return ret - def gradients_X(self, dL_dK, X, X2, target): + def gradients_X(self, dL_dK, X, X2=None): return np.zeros(X.shape) - def gradients_X_diag(self, dL_dKdiag, X, target): + def gradients_X_diag(self, dL_dKdiag, X): return np.zeros(X.shape) - def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): + def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): return np.zeros(Z.shape) - def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): - return np.zeros(mu.shape), np.zeros(S.shape) + def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape) - def psi0(self, Z, mu, S): - return self.Kdiag(mu) + def psi0(self, Z, variational_posterior): + return self.Kdiag(variational_posterior.mean) - def psi1(self, Z, mu, S, target): - return self.K(mu, Z) + def psi1(self, Z, variational_posterior): + return self.K(variational_posterior.mean, Z) - def psi2(Z, mu, S): - K = self.K(mu, Z) + def psi2(self, Z, variational_posterior): + K = self.K(variational_posterior.mean, Z) return K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes class White(Static): def __init__(self, input_dim, variance=1., name='white'): - super(White, self).__init__(input_dim, name) + super(White, self).__init__(input_dim, variance, name) def K(self, X, X2=None): if X2 is None: @@ -51,8 +52,8 @@ class White(Static): else: return np.zeros((X.shape[0], X2.shape[0])) - def psi2(self, Z, mu, S, target): - return np.zeros((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + def psi2(self, Z, variational_posterior): + return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_full(self, dL_dK, X): self.variance.gradient = np.trace(dL_dK) @@ -60,13 +61,13 @@ class White(Static): def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dKdiag.sum() - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): - self.variance.gradient = np.trace(dL_dKmm) + dL_dpsi0.sum() + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + self.variance.gradient = dL_dpsi0.sum() class Bias(Static): def __init__(self, input_dim, variance=1., name='bias'): - super(Bias, self).__init__(input_dim, name) + super(Bias, self).__init__(input_dim, variance, name) def K(self, X, X2=None): shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0]) @@ -80,11 +81,11 @@ class Bias(Static): def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dK.sum() - def psi2(self, Z, mu, S, target): + def psi2(self, Z, variational_posterior): ret = np.empty((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) ret[:] = self.variance**2 return ret - def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z): - self.variance.gradient = dL_dKmm.sum() + dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum() + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum() diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 3b8e391b..2d0d284a 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -5,6 +5,7 @@ from kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp +from ...util.linalg import tdot from ... import util import numpy as np from scipy import integrate @@ -33,10 +34,10 @@ class Stationary(Kern): self.add_parameters(self.variance, self.lengthscale) def K_of_r(self, r): - raise NotImplementedError, "implement the covaraiance functino and a fn of r to use this class" + raise NotImplementedError, "implement the covaraiance function as a fn of r to use this class" def dK_dr(self, r): - raise NotImplementedError, "implement the covaraiance functino and a fn of r to use this class" + raise NotImplementedError, "implement the covaraiance function as a fn of r to use this class" def K(self, X, X2=None): r = self._scaled_dist(X, X2) @@ -47,8 +48,44 @@ class Stationary(Kern): X2 = X return X[:, None, :] - X2[None, :, :] + def _unscaled_dist(self, X, X2=None): + """ + Compute the square distance between each row of X and X2, or between + each pair of rows of X if X2 is None. + """ + if X2 is None: + Xsq = np.sum(np.square(X),1) + return np.sqrt(-2.*tdot(X) + (Xsq[:,None] + Xsq[None,:])) + else: + X1sq = np.sum(np.square(X),1) + X2sq = np.sum(np.square(X2),1) + return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])) + def _scaled_dist(self, X, X2=None): - return np.sqrt(np.sum(np.square(self._dist(X, X2) / self.lengthscale), -1)) + """ + Efficiently compute the scaled distance, r. + + r = \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 + + Note that if thre is only one lengthscale, l comes outside the sum. In + this case we compute the unscaled distance first (in a separate + function for caching) and divide by lengthscale afterwards + + """ + if self.ARD: + if X2 is None: + Xl = X/self.lengthscale + Xsq = np.sum(np.square(Xl),1) + return np.sqrt(np.sqrt(-2.*tdot(Xl) +(Xsq[:,None] + Xsq[None,:]))) + else: + X1l = X/self.lengthscale + X2l = X2/self.lengthscale + X1sq = np.sum(np.square(X1l),1) + X2sq = np.sum(np.square(X2l),1) + return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])) + else: + return self._unscaled_dist(X, X2)/self.lengthscale + def Kdiag(self, X): ret = np.empty(X.shape[0]) @@ -65,16 +102,22 @@ class Stationary(Kern): rinv = self._inv_dist(X, X2) dL_dr = self.dK_dr(r) * dL_dK - x_xl3 = np.square(self._dist(X, X2)) / self.lengthscale**3 if self.ARD: + x_xl3 = np.square(self._dist(X, X2)) / self.lengthscale**3 self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0) else: + x_xl3 = np.square(self._dist(X, X2)) / self.lengthscale**3 self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum() self.variance.gradient = np.sum(K * dL_dK)/self.variance def _inv_dist(self, X, X2=None): + """ + Compute the elementwise inverse of the distance matrix, expecpt on the + diagonal, where we return zero (the distance on the diagonal is zero). + This term appears in derviatives. + """ dist = self._scaled_dist(X, X2) if X2 is None: nondiag = util.diag.offdiag_view(dist) @@ -84,12 +127,25 @@ class Stationary(Kern): return 1./np.where(dist != 0., dist, np.inf) def gradients_X(self, dL_dK, X, X2=None): + """ + Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X + """ r = self._scaled_dist(X, X2) - dL_dr = self.dK_dr(r) * dL_dK invdist = self._inv_dist(X, X2) - ret = np.sum((invdist*dL_dr)[:,:,None]*self._dist(X, X2),1)/self.lengthscale**2 + dL_dr = self.dK_dr(r) * dL_dK + #The high-memory numpy way: ret = np.sum((invdist*dL_dr)[:,:,None]*self._dist(X, X2),1)/self.lengthscale**2 + #if X2 is None: + #ret *= 2. + + #the lower memory way with a loop + tmp = invdist*dL_dr if X2 is None: - ret *= 2. + tmp *= 2. + X2 = X + ret = np.empty(X.shape, dtype=np.float64) + [np.copyto(ret[:,q], np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), 1)) for q in xrange(self.input_dim)] + ret /= self.lengthscale**2 + return ret def gradients_X_diag(self, dL_dKdiag, X): @@ -162,6 +218,8 @@ class Matern52(Stationary): k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} } """ + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Mat52'): + super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, name) def K_of_r(self, r): return self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r) @@ -239,17 +297,23 @@ class RatQuad(Stationary): self.add_parameters(self.power) def K_of_r(self, r): - return self.variance*(1. + r**2/2.)**(-self.power) + r2 = np.power(r, 2.) + return self.variance*np.power(1. + r2/2., -self.power) def dK_dr(self, r): - return -self.variance*self.power*r*(1. + r**2/2)**(-self.power - 1.) + r2 = np.power(r, 2.) + return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.) def update_gradients_full(self, dL_dK, X, X2=None): super(RatQuad, self).update_gradients_full(dL_dK, X, X2) r = self._scaled_dist(X, X2) - r2 = r**2 - dpow = -2.**self.power*(r2 + 2.)**(-self.power)*np.log(0.5*(r2+2.)) - self.power.gradient = np.sum(dL_dK*dpow) - + r2 = np.power(r, 2.) + dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.)) + grad = np.sum(dL_dK*dK_dpow) + self.power.gradient = grad + + def update_gradients_diag(self, dL_dKdiag, X): + super(RatQuad, self).update_gradients_diag(dL_dKdiag, X) + self.power.gradient = 0. diff --git a/GPy/kern/_src/sympykern.py b/GPy/kern/_src/sympykern.py index 3d6517a8..cf6838c4 100644 --- a/GPy/kern/_src/sympykern.py +++ b/GPy/kern/_src/sympykern.py @@ -2,35 +2,17 @@ try: import sympy as sp sympy_available=True + from sympy.utilities.lambdify import lambdify except ImportError: sympy_available=False exit() -from sympy.core.cache import clear_cache -from sympy.utilities.codegen import codegen - -try: - from scipy import weave - weave_available = True -except ImportError: - weave_available = False - -import os -current_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) -import sys import numpy as np -import re -import tempfile -import pdb -import ast - -from kernpart import Kernpart +from kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp -# TODO have this set up in a set up file! -user_code_storage = tempfile.gettempdir() -class spkern(Kernpart): +class Sympykern(Kern): """ A kernel object, where all the hard work in done by sympy. @@ -51,10 +33,10 @@ class spkern(Kernpart): name='sympykern' if k is None: raise ValueError, "You must provide an argument for the covariance function." - super(spkern, self).__init__(input_dim, name) + super(Sympykern, self).__init__(input_dim, name) self._sp_k = k - + # pull the variable names out of the symbolic covariance function. sp_vars = [e for e in k.atoms() if e.is_Symbol] self._sp_x= sorted([e for e in sp_vars if e.name[0:2]=='x_'],key=lambda x:int(x.name[2:])) @@ -66,6 +48,10 @@ class spkern(Kernpart): assert len(self._sp_x)==len(self._sp_z) x_dim=len(self._sp_x) + self._sp_kdiag = k + for x, z in zip(self._sp_x, self._sp_z): + self._sp_kdiag = self._sp_kdiag.subs(z, x) + # If it is a multi-output covariance, add an input for indexing the outputs. self._real_input_dim = x_dim # Check input dim is number of xs + 1 if output_dim is >1 @@ -97,6 +83,8 @@ class spkern(Kernpart): #setattr(self, theta, np.ones(self.output_dim)) self.num_shared_params = len(self._sp_theta) + for theta_i, theta_j in zip(self._sp_theta_i, self._sp_theta_j): + self._sp_kdiag = self._sp_kdiag.subs(theta_j, theta_i) #self.num_params = self.num_shared_params+self.num_split_params*self.output_dim else: @@ -112,43 +100,33 @@ class spkern(Kernpart): if param is not None: if param.has_key(theta): val = param[theta] - #setattr(self, theta.name, val) setattr(self, theta.name, Param(theta.name, val, None)) self.add_parameters(getattr(self, theta.name)) #deal with param #self._set_params(self._get_params()) # Differentiate with respect to parameters. - self._sp_dk_dtheta = [sp.diff(k,theta).simplify() for theta in self._sp_theta] + derivative_arguments = self._sp_x + self._sp_theta if self.output_dim > 1: - self._sp_dk_dtheta_i = [sp.diff(k,theta).simplify() for theta in self._sp_theta_i] - - # differentiate with respect to input variables. - self._sp_dk_dx = [sp.diff(k,xi).simplify() for xi in self._sp_x] - + derivative_arguments += self._sp_theta_i + + self.derivatives = {theta.name : sp.diff(self._sp_k,theta).simplify() for theta in derivative_arguments} + self.diag_derivatives = {theta.name : sp.diff(self._sp_kdiag,theta).simplify() for theta in derivative_arguments} + + + # This gives the parameters for the arg list. + self.arg_list = self._sp_x + self._sp_z + self._sp_theta + self.diag_arg_list = self._sp_x + self._sp_theta + if self.output_dim > 1: + self.arg_list += self._sp_theta_i + self._sp_theta_j + self.diag_arg_list += self._sp_theta_i # psi_stats aren't yet implemented. if False: self.compute_psi_stats() - self._code = {} - # generate the code for the covariance functions self._gen_code() - if weave_available: - if False: - extra_compile_args = ['-ftree-vectorize', '-mssse3', '-ftree-vectorizer-verbose=5'] - else: - extra_compile_args = [] - - self.weave_kwargs = { - 'support_code': None, #self._function_code, - 'include_dirs':[user_code_storage, os.path.join(current_dir,'parts/')], - 'headers':['"sympy_helpers.h"', '"'+self.name+'.h"'], - 'sources':[os.path.join(current_dir,"parts/sympy_helpers.cpp"), os.path.join(user_code_storage, self.name+'.cpp')], - 'extra_compile_args':extra_compile_args, - 'extra_link_args':['-lgomp'], - 'verbose':True} self.parameters_changed() # initializes caches @@ -157,407 +135,128 @@ class spkern(Kernpart): def _gen_code(self): - argument_sequence = self._sp_x+self._sp_z+self._sp_theta - code_list = [('k',self._sp_k)] - # gradients with respect to covariance input - code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)] - # gradient with respect to parameters - code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)] - # gradient with respect to multiple output parameters - if self.output_dim > 1: - argument_sequence += self._sp_theta_i + self._sp_theta_j - code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)] - # generate c functions from sympy objects - if weave_available: - code_type = "C" - else: - code_type = "PYTHON" - # Need to add the sympy_helpers header in here. - (foo_c,self._function_code), (foo_h,self._function_header) = \ - codegen(code_list, - code_type, - self.name, - argument_sequence=argument_sequence) + self._K_function = lambdify(self.arg_list, self._sp_k, 'numpy') + for key in self.derivatives.keys(): + setattr(self, '_K_diff_' + key, lambdify(self.arg_list, self.derivatives[key], 'numpy')) + + self._Kdiag_function = lambdify(self.diag_arg_list, self._sp_kdiag, 'numpy') + for key in self.derivatives.keys(): + setattr(self, '_Kdiag_diff_' + key, lambdify(self.diag_arg_list, self.diag_derivatives[key], 'numpy')) + + def K(self,X,X2=None): + self._K_computations(X, X2) + return self._K_function(**self._arguments) - # Use weave to compute the underlying functions. - if weave_available: - # put the header file where we can find it - f = file(os.path.join(user_code_storage, self.name + '.h'),'w') - f.write(self._function_header) - f.close() + def Kdiag(self,X): + self._K_computations(X) + return self._Kdiag_function(**self._diag_arguments) - - if weave_available: - # Substitute any known derivatives which sympy doesn't compute - self._function_code = re.sub('DiracDelta\(.+?,.+?\)','0.0',self._function_code) - # put the cpp file in user code storage (defaults to temp file location) - f = file(os.path.join(user_code_storage, self.name + '.cpp'),'w') - else: - # put the python file in user code storage - f = file(os.path.join(user_code_storage, self.name + '.py'),'w') - f.write(self._function_code) - f.close() - - if weave_available: - # arg_list will store the arguments required for the C code. - input_arg_list = (["X2(i, %s)"%x.name[2:] for x in self._sp_x] - + ["Z2(j, %s)"%z.name[2:] for z in self._sp_z]) - - # for multiple outputs reverse argument list is also required - if self.output_dim>1: - reverse_input_arg_list = list(input_arg_list) - reverse_input_arg_list.reverse() - - # This gives the parameters for the arg list. - param_arg_list = [shared_params.name for shared_params in self._sp_theta] - arg_list = input_arg_list + param_arg_list - - precompute_list=[] - if self.output_dim > 1: - reverse_arg_list= reverse_input_arg_list + list(param_arg_list) - # For multiple outputs, also need the split parameters. - split_param_arg_list = ["%s1(%s)"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i] - split_param_reverse_arg_list = ["%s1(%s)"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i] - arg_list += split_param_arg_list - reverse_arg_list += split_param_reverse_arg_list - # Extract the right output indices from the inputs. - c_define_output_indices = [' '*16 + "int %s=(int)%s(%s, %i);"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])] - precompute_list += c_define_output_indices - reverse_arg_string = ", ".join(reverse_arg_list) - arg_string = ", ".join(arg_list) - precompute_string = "\n".join(precompute_list) - - # Now we use the arguments in code that computes the separate parts. - - # Any precomputations will be done here eventually. - self._precompute = \ - """ - // Precompute code would go here. It will be called when parameters are updated. - """ - - # Here's the code to do the looping for K - self._code['K'] =\ - """ - // _K_code - // Code for computing the covariance function. - int i; - int j; - int n = target_array->dimensions[0]; - int num_inducing = target_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for - for (i=0;i1: - for i, theta in enumerate(self._sp_theta_i): - grad_func_list = [' '*26 + 'TARGET1(ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(theta.name, arg_string)] - grad_func_list += [' '*26 + 'TARGET1(jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(theta.name, reverse_arg_string)] - grad_func_list = c_define_output_indices+grad_func_list - - grad_func_string = '\n'.join(grad_func_list) - self._code['dK_d' + theta.name] =\ - """ - int i; - int j; - int n = partial_array->dimensions[0]; - int num_inducing = partial_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - for (i=0;idimensions[0]; - int num_inducing = partial_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - for (i=0;i1: - gradX_func_list += c_define_output_indices - gradX_func_list += ["TARGET2(i, %i) += partial[i*num_inducing+j]*dk_dx_%i(%s);"%(q,q,arg_string) for q in range(self._real_input_dim)] - gradX_func_string = "\n".join(gradX_func_list) - - self._code['dK_dX'] = \ - """ - // _dK_dX_code - // Code for computing gradient of covariance with respect to inputs. - int i; - int j; - int n = partial_array->dimensions[0]; - int num_inducing = partial_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - for (int i=0;i1: - arg_names += self._split_theta_names - arg_names += ['output_dim'] - return arg_names - - def _generate_inline(self, code, X, target=None, Z=None, partial=None): - output_dim = self.output_dim - # Need to extract parameters to local variables first - for shared_params in self._sp_theta: - locals()[shared_params.name] = getattr(self, shared_params.name) - - for split_params in self._split_theta_names: - locals()[split_params] = np.asarray(getattr(self, split_params)) - arg_names = self._get_arg_names(target, Z, partial) - - if weave_available: - return weave.inline(code=code, arg_names=arg_names,**self.weave_kwargs) - else: - raise RuntimeError('Weave not available and other variants of sympy covariance not yet implemented') - - def K(self,X,Z,target): - if Z is None: - self._generate_inline(self._code['K_X'], X, target) - else: - self._generate_inline(self._code['K'], X, target, Z) - - - def Kdiag(self,X,target): - self._generate_inline(self._code['Kdiag'], X, target) - def _param_grad_helper(self,partial,X,Z,target): - if Z is None: - self._generate_inline(self._code['dK_dtheta_X'], X, target, Z, partial) - else: - self._generate_inline(self._code['dK_dtheta'], X, target, Z, partial) + pass - def dKdiag_dtheta(self,partial,X,target): - self._generate_inline(self._code['dKdiag_dtheta'], X, target, Z=None, partial=partial).namelocals()[shared_params.name] = getattr(self, shared_params.name) - def gradients_X(self,partial,X,Z,target): - if Z is None: - self._generate_inline(self._code['dK_dX_X'], X, target, Z, partial) - else: - self._generate_inline(self._code['dK_dX'], X, target, Z, partial) + def gradients_X(self, dL_dK, X, X2=None): + #if self._X is None or X.base is not self._X.base or X2 is not None: + self._K_computations(X, X2) + gradients_X = np.zeros((X.shape[0], X.shape[1])) + for i, x in enumerate(self._sp_x): + gf = getattr(self, '_K_diff_' + x.name) + gradients_X[:, i] = (gf(**self._arguments)*dL_dK).sum(1) + if X2 is None: + gradients_X *= 2 + return gradients_X - def dKdiag_dX(self,partial,X,target): - self._generate_inline(self._code['dKdiag_dX'], X, target, Z, partial) - - def compute_psi_stats(self): - #define some normal distributions - mus = [sp.var('mu_%i'%i,real=True) for i in range(self.input_dim)] - Ss = [sp.var('S_%i'%i,positive=True) for i in range(self.input_dim)] - normals = [(2*sp.pi*Si)**(-0.5)*sp.exp(-0.5*(xi-mui)**2/Si) for xi, mui, Si in zip(self._sp_x, mus, Ss)] - - #do some integration! - #self._sp_psi0 = ?? - self._sp_psi1 = self._sp_k - for i in range(self.input_dim): - print 'perfoming integrals %i of %i'%(i+1,2*self.input_dim) - sys.stdout.flush() - self._sp_psi1 *= normals[i] - self._sp_psi1 = sp.integrate(self._sp_psi1,(self._sp_x[i],-sp.oo,sp.oo)) - clear_cache() - self._sp_psi1 = self._sp_psi1.simplify() - - #and here's psi2 (eek!) - zprime = [sp.Symbol('zp%i'%i) for i in range(self.input_dim)] - self._sp_psi2 = self._sp_k.copy()*self._sp_k.copy().subs(zip(self._sp_z,zprime)) - for i in range(self.input_dim): - print 'perfoming integrals %i of %i'%(self.input_dim+i+1,2*self.input_dim) - sys.stdout.flush() - self._sp_psi2 *= normals[i] - self._sp_psi2 = sp.integrate(self._sp_psi2,(self._sp_x[i],-sp.oo,sp.oo)) - clear_cache() - self._sp_psi2 = self._sp_psi2.simplify() - - def parameters_changed(self): - # Reset the caches - self._cache, self._cache2 = np.empty(shape=(2, 1)) - self._cache3, self._cache4, self._cache5 = np.empty(shape=(3, 1)) - - def update_gradients_full(self, dL_dK, X): - # Need to extract parameters to local variables first - self._K_computations(X, None) - for shared_params in self._sp_theta: - parameter = getattr(self, shared_params.name) - code = self._code['dK_d' + shared_params.name] - setattr(parameter, 'gradient', self._generate_inline(code, X, target=None, Z=None, partial=dL_dK)) - - for split_params in self._split_theta_names: - parameter = getattr(self, split_params.name) - code = self._code['dK_d' + split_params.name] - setattr(parameter, 'gradient', self._generate_inline(code, X, target=None, Z=None, partial=dL_dK)) + def gradients_X_diag(self, dL_dK, X): + self._K_computations(X) + dX = np.zeros_like(X) + for i, x in enumerate(self._sp_x): + gf = getattr(self, '_Kdiag_diff_' + x.name) + dX[:, i] = gf(**self._diag_arguments)*dL_dK + return dX + def update_gradients_full(self, dL_dK, X, X2=None): + # Need to extract parameters to local variables first + self._K_computations(X, X2) + for theta in self._sp_theta: + parameter = getattr(self, theta.name) + gf = getattr(self, '_K_diff_' + theta.name) + gradient = (gf(**self._arguments)*dL_dK).sum() + if X2 is not None: + gradient += (gf(**self._reverse_arguments)*dL_dK).sum() + setattr(parameter, 'gradient', gradient) + if self.output_dim>1: + for theta in self._sp_theta_i: + parameter = getattr(self, theta.name[:-2]) + gf = getattr(self, '_K_diff_' + theta.name) + A = gf(**self._arguments)*dL_dK + gradient = np.asarray([A[np.where(self._output_ind==i)].T.sum() + for i in np.arange(self.output_dim)]) + if X2 is None: + gradient *= 2 + else: + A = gf(**self._reverse_arguments)*dL_dK.T + gradient += np.asarray([A[np.where(self._output_ind2==i)].T.sum() + for i in np.arange(self.output_dim)]) + setattr(parameter, 'gradient', gradient) + - # def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z): - # #contributions from Kdiag - # self.variance.gradient = np.sum(dL_dKdiag) - - # #from Knm - # self._K_computations(X, Z) - # self.variance.gradient += np.sum(dL_dKnm * self._K_dvar) - # if self.ARD: - # self.lengthscale.gradient = self._dL_dlengthscales_via_K(dL_dKnm, X, Z) - - # else: - # self.lengthscale.gradient = (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKnm) - - # #from Kmm - # self._K_computations(Z, None) - # self.variance.gradient += np.sum(dL_dKmm * self._K_dvar) - # if self.ARD: - # self.lengthscale.gradient += self._dL_dlengthscales_via_K(dL_dKmm, Z, None) - # else: - # self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dKmm) - - - #---------------------------------------# - # Precomputations # - #---------------------------------------# + def update_gradients_diag(self, dL_dKdiag, X): + self._K_computations(X) + for theta in self._sp_theta: + parameter = getattr(self, theta.name) + gf = getattr(self, '_Kdiag_diff_' + theta.name) + setattr(parameter, 'gradient', (gf(**self._diag_arguments)*dL_dKdiag).sum()) + if self.output_dim>1: + for theta in self._sp_theta_i: + parameter = getattr(self, theta.name[:-2]) + gf = getattr(self, '_Kdiag_diff_' + theta.name) + a = gf(**self._diag_arguments)*dL_dKdiag + setattr(parameter, 'gradient', + np.asarray([a[np.where(self._output_ind==i)].sum() + for i in np.arange(self.output_dim)])) + + def _K_computations(self, X, X2=None): + """Set up argument lists for the derivatives.""" + # Could check if this needs doing or not, there could + # definitely be some computational savings by checking for + # parameter updates here. + self._arguments = {} + self._diag_arguments = {} + for i, x in enumerate(self._sp_x): + self._arguments[x.name] = X[:, i][:, None] + self._diag_arguments[x.name] = X[:, i][:, None] + if self.output_dim > 1: + self._output_ind = np.asarray(X[:, -1], dtype='int') + for i, theta in enumerate(self._sp_theta_i): + self._arguments[theta.name] = np.asarray(getattr(self, theta.name[:-2])[self._output_ind])[:, None] + self._diag_arguments[theta.name] = self._arguments[theta.name] + for theta in self._sp_theta: + self._arguments[theta.name] = np.asarray(getattr(self, theta.name)) + self._diag_arguments[theta.name] = self._arguments[theta.name] - def _K_computations(self, X, Z): - if Z is None: - self._generate_inline(self._precompute, X) + if X2 is not None: + for i, z in enumerate(self._sp_z): + self._arguments[z.name] = X2[:, i][None, :] + if self.output_dim > 1: + self._output_ind2 = np.asarray(X2[:, -1], dtype='int') + for i, theta in enumerate(self._sp_theta_j): + self._arguments[theta.name] = np.asarray(getattr(self, theta.name[:-2])[self._output_ind2])[None, :] else: - self._generate_inline(self._precompute, X, Z=Z) + for z in self._sp_z: + self._arguments[z.name] = self._arguments['x_'+z.name[2:]].T + if self.output_dim > 1: + self._output_ind2 = self._output_ind + for theta in self._sp_theta_j: + self._arguments[theta.name] = self._arguments[theta.name[:-2] + '_i'].T + if X2 is not None: + # These arguments are needed in gradients when X2 is not equal to X. + self._reverse_arguments = self._arguments + for x, z in zip(self._sp_x, self._sp_z): + self._reverse_arguments[x.name] = self._arguments[z.name].T + self._reverse_arguments[z.name] = self._arguments[x.name].T + if self.output_dim > 1: + for theta_i, theta_j in zip(self._sp_theta_i, self._sp_theta_j): + self._reverse_arguments[theta_i.name] = self._arguments[theta_j.name].T + self._reverse_arguments[theta_j.name] = self._arguments[theta_i.name].T + diff --git a/GPy/kern/_src/ODE_1.py b/GPy/kern/_src/todo/ODE_1.py similarity index 100% rename from GPy/kern/_src/ODE_1.py rename to GPy/kern/_src/todo/ODE_1.py diff --git a/GPy/kern/_src/eq_ode1.py b/GPy/kern/_src/todo/eq_ode1.py similarity index 100% rename from GPy/kern/_src/eq_ode1.py rename to GPy/kern/_src/todo/eq_ode1.py diff --git a/GPy/kern/_src/finite_dimensional.py b/GPy/kern/_src/todo/finite_dimensional.py similarity index 100% rename from GPy/kern/_src/finite_dimensional.py rename to GPy/kern/_src/todo/finite_dimensional.py diff --git a/GPy/kern/_src/fixed.py b/GPy/kern/_src/todo/fixed.py similarity index 100% rename from GPy/kern/_src/fixed.py rename to GPy/kern/_src/todo/fixed.py diff --git a/GPy/kern/_src/gibbs.py b/GPy/kern/_src/todo/gibbs.py similarity index 100% rename from GPy/kern/_src/gibbs.py rename to GPy/kern/_src/todo/gibbs.py diff --git a/GPy/kern/_src/hetero.py b/GPy/kern/_src/todo/hetero.py similarity index 100% rename from GPy/kern/_src/hetero.py rename to GPy/kern/_src/todo/hetero.py diff --git a/GPy/kern/_src/odekern1.c b/GPy/kern/_src/todo/odekern1.c similarity index 100% rename from GPy/kern/_src/odekern1.c rename to GPy/kern/_src/todo/odekern1.c diff --git a/GPy/kern/_src/poly.py b/GPy/kern/_src/todo/poly.py similarity index 100% rename from GPy/kern/_src/poly.py rename to GPy/kern/_src/todo/poly.py diff --git a/GPy/kern/_src/rbf_inv.py b/GPy/kern/_src/todo/rbf_inv.py similarity index 100% rename from GPy/kern/_src/rbf_inv.py rename to GPy/kern/_src/todo/rbf_inv.py diff --git a/GPy/kern/_src/spline.py b/GPy/kern/_src/todo/spline.py similarity index 100% rename from GPy/kern/_src/spline.py rename to GPy/kern/_src/todo/spline.py diff --git a/GPy/kern/_src/symmetric.py b/GPy/kern/_src/todo/symmetric.py similarity index 100% rename from GPy/kern/_src/symmetric.py rename to GPy/kern/_src/todo/symmetric.py diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 388ce173..10df906d 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -2,9 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from scipy import stats, special -import scipy as sp -from GPy.util.univariate_Gaussian import std_norm_pdf, std_norm_cdf +from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf import link_functions from likelihood import Likelihood diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 3fcaffa8..83db4b8c 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -15,3 +15,4 @@ from mrd import MRD from gradient_checker import GradientChecker from gp_multioutput_regression import GPMultioutputRegression from sparse_gp_multioutput_regression import SparseGPMultioutputRegression +from ss_gplvm import SSGPLVM \ No newline at end of file diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 8455c7a1..5fed767b 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -32,21 +32,23 @@ class BayesianGPLVM(SparseGP): if X_variance is None: X_variance = np.random.uniform(0,.1,X.shape) + if Z is None: Z = np.random.permutation(X.copy())[:num_inducing] assert Z.shape[1] == X.shape[1] if kernel is None: kernel = kern.RBF(input_dim) # + kern.white(input_dim) - + if likelihood is None: likelihood = Gaussian() - #self.q = NormalPosterior(X, X_variance) + + self.variational_prior = NormalPrior() - - SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, X_variance, name, **kwargs) - self.add_parameter(self.q, index=0) - #self.ensure_default_constraints() + X = NormalPosterior(X, X_variance) + + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs) + self.add_parameter(self.X, index=0) def _getstate(self): """ @@ -62,16 +64,14 @@ class BayesianGPLVM(SparseGP): def parameters_changed(self): super(BayesianGPLVM, self).parameters_changed() - self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.q) - - # TODO: This has to go into kern - # maybe a update_gradients_q_variational? - self.q.mean.gradient, self.q.variance.gradient = self.kern.gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict) - + self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) + + self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) + # update for the KL divergence - self.variational_prior.update_gradients_KL(self.q) - - + self.variational_prior.update_gradients_KL(self.X) + + def plot_latent(self, plot_inducing=True, *args, **kwargs): """ See GPy.plotting.matplot_dep.dim_reduction_plots.plot_latent @@ -150,14 +150,14 @@ class BayesianGPLVM(SparseGP): return dim_reduction_plots.plot_steepest_gradient_map(self,*args,**kwargs) class BayesianGPLVMWithMissingData(BayesianGPLVM): - def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, + def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, Z=None, kernel=None, inference_method=None, likelihood=None, name='bayesian gplvm', **kwargs): from ..util.subarray_and_sorting import common_subarrays self.subarrays = common_subarrays(Y) import ipdb;ipdb.set_trace() BayesianGPLVM.__init__(self, Y, input_dim, X=X, X_variance=X_variance, init=init, num_inducing=num_inducing, Z=Z, kernel=kernel, inference_method=inference_method, likelihood=likelihood, name=name, **kwargs) - - + + def parameters_changed(self): super(BayesianGPLVM, self).parameters_changed() self._log_marginal_likelihood -= self.KL_divergence() @@ -165,12 +165,12 @@ class BayesianGPLVMWithMissingData(BayesianGPLVM): dL_dmu, dL_dS = self.dL_dmuS() # dL: - self.q.mean.gradient = dL_dmu - self.q.variance.gradient = dL_dS + self.X.mean.gradient = dL_dmu + self.X.variance.gradient = dL_dS # dKL: - self.q.mean.gradient -= self.X - self.q.variance.gradient -= (1. - (1. / (self.X_variance))) * 0.5 + self.X.mean.gradient -= self.X.mean + self.X.variance.gradient -= (1. - (1. / (self.X.variance))) * 0.5 if __name__ == '__main__': import numpy as np @@ -178,7 +178,7 @@ if __name__ == '__main__': W = np.linspace(0,1,10)[None,:] Y = (X*W).sum(1) missing = np.random.binomial(1,.1,size=Y.shape) - + pass def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 61defb7d..54c89a89 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -7,6 +7,7 @@ from ..core import SparseGP from .. import likelihoods from .. import kern from ..inference.latent_function_inference import VarDTC +from ..util.misc import param_to_array class SparseGPRegression(SparseGP): """ @@ -33,18 +34,18 @@ class SparseGPRegression(SparseGP): # kern defaults to rbf (plus white for stability) if kernel is None: - kernel = kern.rbf(input_dim)# + kern.white(input_dim, variance=1e-3) + kernel = kern.RBF(input_dim)# + kern.white(input_dim, variance=1e-3) # Z defaults to a subset of the data if Z is None: i = np.random.permutation(num_data)[:min(num_inducing, num_data)] - Z = X[i].copy() + Z = param_to_array(X)[i].copy() else: assert Z.shape[1] == input_dim likelihood = likelihoods.Gaussian() - SparseGP.__init__(self, X, Y, Z, kernel, likelihood, X_variance=X_variance, inference_method=VarDTC()) + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=VarDTC()) def _getstate(self): return SparseGP._getstate(self) diff --git a/GPy/models/ss_gplvm.py b/GPy/models/ss_gplvm.py new file mode 100644 index 00000000..f21da605 --- /dev/null +++ b/GPy/models/ss_gplvm.py @@ -0,0 +1,301 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +import itertools +from matplotlib import pyplot + +from ..core.sparse_gp import SparseGP +from .. import kern +from ..likelihoods import Gaussian +from ..inference.optimization import SCG +from ..util import linalg +from ..core.parameterization.variational import SpikeAndSlabPrior, SpikeAndSlabPosterior + +class SSGPLVM(SparseGP): + """ + Spike-and-Slab Gaussian Process Latent Variable Model + + :param Y: observed data (np.ndarray) or GPy.likelihood + :type Y: np.ndarray| GPy.likelihood instance + :param input_dim: latent dimensionality + :type input_dim: int + :param init: initialisation method for the latent space + :type init: 'PCA'|'random' + + """ + def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, + Z=None, kernel=None, inference_method=None, likelihood=None, name='Spike-and-Slab GPLVM', **kwargs): + + if X == None: # The mean of variational approximation (mu) + from ..util.initialization import initialize_latent + X = initialize_latent(init, input_dim, Y) + self.init = init + + if X_variance is None: # The variance of the variational approximation (S) + X_variance = np.random.uniform(0,.1,X.shape) + + gamma = np.empty_like(X) # The posterior probabilities of the binary variable in the variational approximation + gamma[:] = 0.5 + + if Z is None: + Z = np.random.permutation(X.copy())[:num_inducing] + assert Z.shape[1] == X.shape[1] + + if likelihood is None: + likelihood = Gaussian() + + if kernel is None: + kernel = kern.SSRBF(input_dim) + + self.variational_prior = SpikeAndSlabPrior(pi=0.5) # the prior probability of the latent binary variable b + X = SpikeAndSlabPosterior(X, X_variance, gamma) + + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs) + self.add_parameter(self.X, index=0) + + def parameters_changed(self): + super(SSGPLVM, self).parameters_changed() + self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) + + self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_q_variational(posterior_variational=self.X, Z=self.Z, **self.grad_dict) + + # update for the KL divergence + self.variational_prior.update_gradients_KL(self.X) + + def plot_latent(self, plot_inducing=True, *args, **kwargs): + pass + #return plot_latent.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs) + + def do_test_latents(self, Y): + """ + Compute the latent representation for a set of new points Y + + Notes: + This will only work with a univariate Gaussian likelihood (for now) + """ + assert not self.likelihood.is_heteroscedastic + N_test = Y.shape[0] + input_dim = self.Z.shape[1] + means = np.zeros((N_test, input_dim)) + covars = np.zeros((N_test, input_dim)) + + dpsi0 = -0.5 * self.output_dim * self.likelihood.precision + dpsi2 = self.dL_dpsi2[0][None, :, :] # TODO: this may change if we ignore het. likelihoods + V = self.likelihood.precision * Y + + #compute CPsi1V + if self.Cpsi1V is None: + psi1V = np.dot(self.psi1.T, self.likelihood.V) + tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0) + tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1) + self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1) + + dpsi1 = np.dot(self.Cpsi1V, V.T) + + start = np.zeros(self.input_dim * 2) + + for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]): + args = (self.kern, self.Z, dpsi0, dpsi1_n.T, dpsi2) + xopt, fopt, neval, status = SCG(f=latent_cost, gradf=latent_grad, x=start, optargs=args, display=False) + + mu, log_S = xopt.reshape(2, 1, -1) + means[n] = mu[0].copy() + covars[n] = np.exp(log_S[0]).copy() + + return means, covars + + def dmu_dX(self, Xnew): + """ + Calculate the gradient of the prediction at Xnew w.r.t Xnew. + """ + dmu_dX = np.zeros_like(Xnew) + for i in range(self.Z.shape[0]): + dmu_dX += self.kern.dK_dX(self.Cpsi1Vf[i:i + 1, :], Xnew, self.Z[i:i + 1, :]) + return dmu_dX + + def dmu_dXnew(self, Xnew): + """ + Individual gradient of prediction at Xnew w.r.t. each sample in Xnew + """ + dK_dX = np.zeros((Xnew.shape[0], self.num_inducing)) + ones = np.ones((1, 1)) + for i in range(self.Z.shape[0]): + dK_dX[:, i] = self.kern.dK_dX(ones, Xnew, self.Z[i:i + 1, :]).sum(-1) + return np.dot(dK_dX, self.Cpsi1Vf) + + def plot_steepest_gradient_map(self, fignum=None, ax=None, which_indices=None, labels=None, data_labels=None, data_marker='o', data_s=40, resolution=20, aspect='auto', updates=False, ** kwargs): + input_1, input_2 = significant_dims = most_significant_input_dimensions(self, which_indices) + + X = np.zeros((resolution ** 2, self.input_dim)) + indices = np.r_[:X.shape[0]] + if labels is None: + labels = range(self.output_dim) + + def plot_function(x): + X[:, significant_dims] = x + dmu_dX = self.dmu_dXnew(X) + argmax = np.argmax(dmu_dX, 1) + return dmu_dX[indices, argmax], np.array(labels)[argmax] + + if ax is None: + fig = pyplot.figure(num=fignum) + ax = fig.add_subplot(111) + + if data_labels is None: + data_labels = np.ones(self.num_data) + ulabels = [] + for lab in data_labels: + if not lab in ulabels: + ulabels.append(lab) + marker = itertools.cycle(list(data_marker)) + from GPy.util import Tango + for i, ul in enumerate(ulabels): + if type(ul) is np.string_: + this_label = ul + elif type(ul) is np.int64: + this_label = 'class %i' % ul + else: + this_label = 'class %i' % i + m = marker.next() + index = np.nonzero(data_labels == ul)[0] + x = self.X[index, input_1] + y = self.X[index, input_2] + ax.scatter(x, y, marker=m, s=data_s, color=Tango.nextMedium(), label=this_label) + + ax.set_xlabel('latent dimension %i' % input_1) + ax.set_ylabel('latent dimension %i' % input_2) + + from matplotlib.cm import get_cmap + from GPy.util.latent_space_visualizations.controllers.imshow_controller import ImAnnotateController + if not 'cmap' in kwargs.keys(): + kwargs.update(cmap=get_cmap('jet')) + controller = ImAnnotateController(ax, + plot_function, + tuple(self.X.min(0)[:, significant_dims]) + tuple(self.X.max(0)[:, significant_dims]), + resolution=resolution, + aspect=aspect, + **kwargs) + ax.legend() + ax.figure.tight_layout() + if updates: + pyplot.show() + clear = raw_input('Enter to continue') + if clear.lower() in 'yes' or clear == '': + controller.deactivate() + return controller.view + + def plot_X_1d(self, fignum=None, ax=None, colors=None): + """ + Plot latent space X in 1D: + + - if fig is given, create input_dim subplots in fig and plot in these + - if ax is given plot input_dim 1D latent space plots of X into each `axis` + - if neither fig nor ax is given create a figure with fignum and plot in there + + colors: + colors of different latent space dimensions input_dim + + """ + import pylab + if ax is None: + fig = pylab.figure(num=fignum, figsize=(8, min(12, (2 * self.X.shape[1])))) + if colors is None: + colors = pylab.gca()._get_lines.color_cycle + pylab.clf() + else: + colors = iter(colors) + plots = [] + x = np.arange(self.X.shape[0]) + for i in range(self.X.shape[1]): + if ax is None: + a = fig.add_subplot(self.X.shape[1], 1, i + 1) + elif isinstance(ax, (tuple, list)): + a = ax[i] + else: + raise ValueError("Need one ax per latent dimnesion input_dim") + a.plot(self.X, c='k', alpha=.3) + plots.extend(a.plot(x, self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i))) + a.fill_between(x, + self.X.T[i] - 2 * np.sqrt(self.X_variance.T[i]), + self.X.T[i] + 2 * np.sqrt(self.X_variance.T[i]), + facecolor=plots[-1].get_color(), + alpha=.3) + a.legend(borderaxespad=0.) + a.set_xlim(x.min(), x.max()) + if i < self.X.shape[1] - 1: + a.set_xticklabels('') + pylab.draw() + if ax is None: + fig.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95)) + return fig + + def getstate(self): + """ + Get the current state of the class, + here just all the indices, rest can get recomputed + """ + return SparseGP._getstate(self) + [self.init] + + def setstate(self, state): + self._const_jitter = None + self.init = state.pop() + SparseGP._setstate(self, state) + + +def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + objective function for fitting the latent variables for test points + (negative log-likelihood: should be minimised!) + """ + mu, log_S = mu_S.reshape(2, 1, -1) + S = np.exp(log_S) + + psi0 = kern.psi0(Z, mu, S) + psi1 = kern.psi1(Z, mu, S) + psi2 = kern.psi2(Z, mu, S) + + lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S) + + mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S) + mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S) + mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S) + + dmu = mu0 + mu1 + mu2 - mu + # dS = S0 + S1 + S2 -0.5 + .5/S + dlnS = S * (S0 + S1 + S2 - 0.5) + .5 + return -lik, -np.hstack((dmu.flatten(), dlnS.flatten())) + +def latent_cost(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + objective function for fitting the latent variables (negative log-likelihood: should be minimised!) + This is the same as latent_cost_and_grad but only for the objective + """ + mu, log_S = mu_S.reshape(2, 1, -1) + S = np.exp(log_S) + + psi0 = kern.psi0(Z, mu, S) + psi1 = kern.psi1(Z, mu, S) + psi2 = kern.psi2(Z, mu, S) + + lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S) + return -float(lik) + +def latent_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + This is the same as latent_cost_and_grad but only for the grad + """ + mu, log_S = mu_S.reshape(2, 1, -1) + S = np.exp(log_S) + + mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S) + mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S) + mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S) + + dmu = mu0 + mu1 + mu2 - mu + # dS = S0 + S1 + S2 -0.5 + .5/S + dlnS = S * (S0 + S1 + S2 - 0.5) + .5 + + return -np.hstack((dmu.flatten(), dlnS.flatten())) + + diff --git a/GPy/testing/bgplvm_tests.py b/GPy/testing/bgplvm_tests.py index 1192448a..fd55d314 100644 --- a/GPy/testing/bgplvm_tests.py +++ b/GPy/testing/bgplvm_tests.py @@ -10,11 +10,11 @@ class BGPLVMTests(unittest.TestCase): def test_bias_kern(self): N, num_inducing, input_dim, D = 10, 3, 2, 4 X = np.random.rand(N, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.bias(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) @@ -22,11 +22,11 @@ class BGPLVMTests(unittest.TestCase): def test_linear_kern(self): N, num_inducing, input_dim, D = 10, 3, 2, 4 X = np.random.rand(N, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) @@ -34,11 +34,11 @@ class BGPLVMTests(unittest.TestCase): def test_rbf_kern(self): N, num_inducing, input_dim, D = 10, 3, 2, 4 X = np.random.rand(N, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) @@ -46,11 +46,11 @@ class BGPLVMTests(unittest.TestCase): def test_rbf_bias_kern(self): N, num_inducing, input_dim, D = 10, 3, 2, 4 X = np.random.rand(N, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) @@ -58,11 +58,11 @@ class BGPLVMTests(unittest.TestCase): def test_rbf_line_kern(self): N, num_inducing, input_dim, D = 10, 3, 2, 4 X = np.random.rand(N, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) @@ -70,11 +70,11 @@ class BGPLVMTests(unittest.TestCase): def test_linear_bias_kern(self): N, num_inducing, input_dim, D = 30, 5, 4, 30 X = np.random.rand(N, input_dim) - k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T Y -= Y.mean(axis=0) - k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/gplvm_tests.py b/GPy/testing/gplvm_tests.py index 6223d833..a605a96c 100644 --- a/GPy/testing/gplvm_tests.py +++ b/GPy/testing/gplvm_tests.py @@ -9,10 +9,10 @@ class GPLVMTests(unittest.TestCase): def test_bias_kern(self): num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4 X = np.random.rand(num_data, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T - k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) m.randomize() self.assertTrue(m.checkgrad()) @@ -20,10 +20,10 @@ class GPLVMTests(unittest.TestCase): def test_linear_kern(self): num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4 X = np.random.rand(num_data, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T - k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) m.randomize() self.assertTrue(m.checkgrad()) @@ -31,10 +31,10 @@ class GPLVMTests(unittest.TestCase): def test_rbf_kern(self): num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4 X = np.random.rand(num_data, input_dim) - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T - k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) + k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 40cd66dd..e5985145 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -4,6 +4,7 @@ import unittest import numpy as np import GPy +import sys verbose = True @@ -14,106 +15,223 @@ except ImportError: SYMPY_AVAILABLE=False -class KernelTests(unittest.TestCase): - def test_kerneltie(self): - K = GPy.kern.rbf(5, ARD=True) - K.tie_params('.*[01]') - K.constrain_fixed('2') - X = np.random.rand(5,5) - Y = np.ones((5,1)) - m = GPy.models.GPRegression(X,Y,K) - self.assertTrue(m.checkgrad()) +class Kern_check_model(GPy.core.Model): + """ + This is a dummy model class used as a base class for checking that the + gradients of a given kernel are implemented correctly. It enables + checkgrad() to be called independently on a kernel. + """ + def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): + GPy.core.Model.__init__(self, 'kernel_test_model') + if kernel==None: + kernel = GPy.kern.RBF(1) + if X is None: + X = np.random.randn(20, kernel.input_dim) + if dL_dK is None: + if X2 is None: + dL_dK = np.ones((X.shape[0], X.shape[0])) + else: + dL_dK = np.ones((X.shape[0], X2.shape[0])) - def test_rbfkernel(self): - kern = GPy.kern.rbf(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + self.kernel = kernel + self.X = GPy.core.parameterization.Param('X',X) + self.X2 = X2 + self.dL_dK = dL_dK - def test_rbf_sympykernel(self): - if SYMPY_AVAILABLE: - kern = GPy.kern.rbf_sympy(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def is_positive_definite(self): + v = np.linalg.eig(self.kernel.K(self.X))[0] + if any(v<-10*sys.float_info.epsilon): + return False + else: + return True - def test_eq_sympykernel(self): - if SYMPY_AVAILABLE: - kern = GPy.kern.eq_sympy(5, 3) - self.assertTrue(GPy.kern.kern_test(kern, output_ind=4, verbose=verbose)) + def log_likelihood(self): + return np.sum(self.dL_dK*self.kernel.K(self.X, self.X2)) - def test_ode1_eqkernel(self): - if SYMPY_AVAILABLE: - kern = GPy.kern.ode1_eq(3) - self.assertTrue(GPy.kern.kern_test(kern, output_ind=1, verbose=verbose, X_positive=True)) +class Kern_check_dK_dtheta(Kern_check_model): + """ + This class allows gradient checks for the gradient of a kernel with + respect to parameters. + """ + def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): + Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=X2) + self.add_parameter(self.kernel) - def test_rbf_invkernel(self): - kern = GPy.kern.rbf_inv(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def parameters_changed(self): + return self.kernel.update_gradients_full(self.dL_dK, self.X, self.X2) - def test_Matern32kernel(self): - kern = GPy.kern.Matern32(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) - def test_Matern52kernel(self): - kern = GPy.kern.Matern52(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) +class Kern_check_dKdiag_dtheta(Kern_check_model): + """ + This class allows gradient checks of the gradient of the diagonal of a + kernel with respect to the parameters. + """ + def __init__(self, kernel=None, dL_dK=None, X=None): + Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=None) + self.add_parameter(self.kernel) - def test_linearkernel(self): - kern = GPy.kern.linear(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def parameters_changed(self): + self.kernel.update_gradients_diag(self.dL_dK, self.X) - def test_periodic_exponentialkernel(self): - kern = GPy.kern.periodic_exponential(1) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def log_likelihood(self): + return (np.diag(self.dL_dK)*self.kernel.Kdiag(self.X)).sum() - def test_periodic_Matern32kernel(self): - kern = GPy.kern.periodic_Matern32(1) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def parameters_changed(self): + return self.kernel.update_gradients_diag(np.diag(self.dL_dK), self.X) - def test_periodic_Matern52kernel(self): - kern = GPy.kern.periodic_Matern52(1) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) +class Kern_check_dK_dX(Kern_check_model): + """This class allows gradient checks for the gradient of a kernel with respect to X. """ + def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): + Kern_check_model.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=X2) + self.add_parameter(self.X) - def test_rational_quadratickernel(self): - kern = GPy.kern.rational_quadratic(1) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def parameters_changed(self): + self.X.gradient = self.kernel.gradients_X(self.dL_dK, self.X, self.X2) - def test_gibbskernel(self): - kern = GPy.kern.gibbs(5, mapping=GPy.mappings.Linear(5, 1)) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) +class Kern_check_dKdiag_dX(Kern_check_dK_dX): + """This class allows gradient checks for the gradient of a kernel diagonal with respect to X. """ + def __init__(self, kernel=None, dL_dK=None, X=None, X2=None): + Kern_check_dK_dX.__init__(self,kernel=kernel,dL_dK=dL_dK, X=X, X2=None) - def test_heterokernel(self): - kern = GPy.kern.hetero(5, mapping=GPy.mappings.Linear(5, 1), transform=GPy.core.transformations.logexp()) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def log_likelihood(self): + return (np.diag(self.dL_dK)*self.kernel.Kdiag(self.X)).sum() - def test_mlpkernel(self): - kern = GPy.kern.mlp(5) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + def parameters_changed(self): + self.X.gradient = self.kernel.gradients_X_diag(self.dL_dK, self.X) - def test_polykernel(self): - kern = GPy.kern.poly(5, degree=4) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) +def kern_test(kern, X=None, X2=None, output_ind=None, verbose=False): + """ + This function runs on kernels to check the correctness of their + implementation. It checks that the covariance function is positive definite + for a randomly generated data set. - def test_fixedkernel(self): - """ - Fixed effect kernel test - """ - X = np.random.rand(30, 4) - K = np.dot(X, X.T) - kernel = GPy.kern.fixed(4, K) - kern = GPy.kern.poly(5, degree=4) - self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + :param kern: the kernel to be tested. + :type kern: GPy.kern.Kernpart + :param X: X input values to test the covariance function. + :type X: ndarray + :param X2: X2 input values to test the covariance function. + :type X2: ndarray - # def test_coregionalization(self): - # X1 = np.random.rand(50,1)*8 - # X2 = np.random.rand(30,1)*5 - # index = np.vstack((np.zeros_like(X1),np.ones_like(X2))) - # X = np.hstack((np.vstack((X1,X2)),index)) - # Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05 - # Y2 = np.sin(X2) + np.random.randn(*X2.shape)*0.05 + 2. - # Y = np.vstack((Y1,Y2)) + """ + pass_checks = True + if X==None: + X = np.random.randn(10, kern.input_dim) + if output_ind is not None: + X[:, output_ind] = np.random.randint(kern.output_dim, X.shape[0]) + if X2==None: + X2 = np.random.randn(20, kern.input_dim) + if output_ind is not None: + X2[:, output_ind] = np.random.randint(kern.output_dim, X2.shape[0]) + + if verbose: + print("Checking covariance function is positive definite.") + result = Kern_check_model(kern, X=X).is_positive_definite() + if result and verbose: + print("Check passed.") + if not result: + print("Positive definite check failed for " + kern.name + " covariance function.") + pass_checks = False + return False + + if verbose: + print("Checking gradients of K(X, X) wrt theta.") + result = Kern_check_dK_dtheta(kern, X=X, X2=None).checkgrad(verbose=verbose) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dK_dtheta(kern, X=X, X2=None).checkgrad(verbose=True) + pass_checks = False + return False + + if verbose: + print("Checking gradients of K(X, X2) wrt theta.") + result = Kern_check_dK_dtheta(kern, X=X, X2=X2).checkgrad(verbose=verbose) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dK_dtheta(kern, X=X, X2=X2).checkgrad(verbose=True) + pass_checks = False + return False + + if verbose: + print("Checking gradients of Kdiag(X) wrt theta.") + result = Kern_check_dKdiag_dtheta(kern, X=X).checkgrad(verbose=verbose) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of Kdiag(X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dKdiag_dtheta(kern, X=X).checkgrad(verbose=True) + pass_checks = False + return False + + if verbose: + print("Checking gradients of K(X, X) wrt X.") + try: + result = Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=verbose) + except NotImplementedError: + result=True + if verbose: + print("gradients_X not implemented for " + kern.name) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=True) + pass_checks = False + return False + + if verbose: + print("Checking gradients of K(X, X2) wrt X.") + try: + result = Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=verbose) + except NotImplementedError: + result=True + if verbose: + print("gradients_X not implemented for " + kern.name) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=True) + pass_checks = False + return False + + if verbose: + print("Checking gradients of Kdiag(X) wrt X.") + try: + result = Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=verbose) + except NotImplementedError: + result=True + if verbose: + print("gradients_X not implemented for " + kern.name) + if result and verbose: + print("Check passed.") + if not result: + print("Gradient of Kdiag(X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=True) + pass_checks = False + return False + + return pass_checks + + +class KernelTestsContinuous(unittest.TestCase): + def setUp(self): + self.X = np.random.randn(100,2) + self.X2 = np.random.randn(110,2) + + def test_Matern32(self): + k = GPy.kern.Matern32(2) + self.assertTrue(kern_test(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_Matern52(self): + k = GPy.kern.Matern52(2) + self.assertTrue(kern_test(k, X=self.X, X2=self.X2, verbose=verbose)) + + #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize - # k1 = GPy.kern.rbf(1) + GPy.kern.bias(1) - # k2 = GPy.kern.coregionalize(2,1) - # kern = k1**k2 - # self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) if __name__ == "__main__": diff --git a/GPy/testing/bcgplvm_tests.py b/GPy/testing/old_tests/bcgplvm_tests.py similarity index 100% rename from GPy/testing/bcgplvm_tests.py rename to GPy/testing/old_tests/bcgplvm_tests.py diff --git a/GPy/testing/cgd_tests.py b/GPy/testing/old_tests/cgd_tests.py similarity index 94% rename from GPy/testing/cgd_tests.py rename to GPy/testing/old_tests/cgd_tests.py index 82041c9f..c2653ea5 100644 --- a/GPy/testing/cgd_tests.py +++ b/GPy/testing/old_tests/cgd_tests.py @@ -5,10 +5,10 @@ Created on 26 Apr 2013 ''' import unittest import numpy -from GPy.inference.conjugate_gradient_descent import CGD, RUNNING +from GPy.inference.optimization.conjugate_gradient_descent import CGD, RUNNING import pylab from scipy.optimize.optimize import rosen, rosen_der -from GPy.inference.gradient_descent_update_rules import PolakRibiere +from GPy.inference.optimization.gradient_descent_update_rules import PolakRibiere class Test(unittest.TestCase): @@ -30,7 +30,7 @@ class Test(unittest.TestCase): assert numpy.allclose(res[0], 0, atol=1e-5) break except AssertionError: - import ipdb;ipdb.set_trace() + import pdb;pdb.set_trace() # RESTART pass else: @@ -108,4 +108,3 @@ if __name__ == "__main__": res[0] = opt.opt(f, df, x0.copy(), callback, messages=True, maxiter=1000, report_every=7, gtol=1e-12, update_rule=PolakRibiere) - pass diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 22b4f86c..f2b372be 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -62,41 +62,41 @@ def force_F_ordered(A): print "why are your arrays not F order?" return np.asfortranarray(A) +# def jitchol(A, maxtries=5): +# A = force_F_ordered_symmetric(A) +# L, info = lapack.dpotrf(A, lower=1) +# if info == 0: +# return L +# else: +# if maxtries==0: +# raise linalg.LinAlgError, "not positive definite, even with jitter." +# diagA = np.diag(A) +# if np.any(diagA <= 0.): +# raise linalg.LinAlgError, "not pd: non-positive diagonal elements" +# jitter = diagA.mean() * 1e-6 + +# return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1) + def jitchol(A, maxtries=5): - A = force_F_ordered_symmetric(A) - L, info = lapack.dpotrf(A, lower=1) - if info == 0: - return L - else: - if maxtries==0: - raise linalg.LinAlgError, "not positive definite, even with jitter." - diagA = np.diag(A) - if np.any(diagA <= 0.): - raise linalg.LinAlgError, "not pd: non-positive diagonal elements" - jitter = diagA.mean() * 1e-6 + A = np.ascontiguousarray(A) + L, info = lapack.dpotrf(A, lower=1) + if info == 0: + return L + else: + diagA = np.diag(A) + if np.any(diagA <= 0.): + raise linalg.LinAlgError, "not pd: non-positive diagonal elements" + jitter = diagA.mean() * 1e-6 + while maxtries > 0 and np.isfinite(jitter): + print 'Warning: adding jitter of {:.10e}'.format(jitter) + try: + return linalg.cholesky(A + np.eye(A.shape[0]).T * jitter, lower=True) + except: + jitter *= 10 + finally: + maxtries -= 1 + raise linalg.LinAlgError, "not positive definite, even with jitter." - return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1) - -#def jitchol(A, maxtries=5): -# A = np.ascontiguousarray(A) -# L, info = lapack.dpotrf(A, lower=1) -# if info == 0: -# return L -# else: -# diagA = np.diag(A) -# if np.any(diagA <= 0.): -# raise linalg.LinAlgError, "not pd: non-positive diagonal elements" -# jitter = diagA.mean() * 1e-6 -# while maxtries > 0 and np.isfinite(jitter): -# print 'Warning: adding jitter of {:.10e}'.format(jitter) -# try: -# return linalg.cholesky(A + np.eye(A.shape[0]).T * jitter, lower=True) -# except: -# jitter *= 10 -# finally: -# maxtries -= 1 -# raise linalg.LinAlgError, "not positive definite, even with jitter." -#