diff --git a/GPy/core/gp.py b/GPy/core/gp.py index a0e60bcc..847cd99d 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -96,7 +96,7 @@ class GP(GPBase): model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ - return -0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z + return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z def _log_likelihood_gradients(self): diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 810098fe..ac39fd66 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -60,6 +60,28 @@ def GPLVM_oil_100(optimize=True): m.plot_latent(labels=m.data_labels) return m +def sparseGPLVM_oil(optimize=True, N=100, Q=6, num_inducing=15, max_iters=50): + np.random.seed(0) + data = GPy.util.datasets.oil() + + Y = data['X'][:N] + Y = Y - Y.mean(0) + Y /= Y.std(0) + + # create simple GP model + kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) + m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing = num_inducing) + m.data_labels = data['Y'].argmax(axis=1) + + # optimize + if optimize: + m.optimize('scg', messages=1, max_iters = max_iters) + + # plot + print(m) + #m.plot_latent(labels=m.data_labels) + return m + def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False): from GPy.util.datasets import swiss_roll_generated from GPy.core.transformations import logexp_clipped @@ -114,7 +136,7 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False m.optimize('scg', messages=1) return m -def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot=False, **k): +def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=False, **k): np.random.seed(0) data = GPy.util.datasets.oil() @@ -135,9 +157,9 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot= # optimize if optimize: m.constrain_fixed('noise') - m.optimize('scg', messages=1, max_f_eval=100, gtol=.05) + m.optimize('scg', messages=1, max_iters=100, gtol=.05) m.constrain_positive('noise') - m.optimize('scg', messages=1, max_f_eval=max_f_eval, gtol=.05) + m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05) if plot: y = m.likelihood.Y[0, :] @@ -241,7 +263,7 @@ def bgplvm_simulation_matlab_compare(): def bgplvm_simulation(optimize='scg', plot=True, - max_f_eval=2e4): + max_iters=2e4): # from GPy.core.transformations import logexp_clipped D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot) @@ -262,8 +284,7 @@ def bgplvm_simulation(optimize='scg', if optimize: print "Optimizing model:" - m.optimize(optimize, max_iters=max_f_eval, - max_f_eval=max_f_eval, + m.optimize(optimize, max_iters=max_iters, messages=True, gtol=.05) if plot: m.plot_X_1d("BGPLVM Latent Space 1D") diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 452167ce..cc92c543 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -57,6 +57,79 @@ def toy_rbf_1d_50(optim_iters=100): print(m) return m +def toy_ARD(optim_iters=1000, kernel_type='linear', N=300, D=4): + # Create an artificial dataset where the values in the targets (Y) + # only depend in dimensions 1 and 3 of the inputs (X). Run ARD to + # see if this dependency can be recovered + X1 = np.sin(np.sort(np.random.rand(N,1)*10,0)) + X2 = np.cos(np.sort(np.random.rand(N,1)*10,0)) + X3 = np.exp(np.sort(np.random.rand(N,1),0)) + X4 = np.log(np.sort(np.random.rand(N,1),0)) + X = np.hstack((X1, X2, X3, X4)) + + Y1 = np.asarray(2*X[:,0]+3).T + Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0])).T + Y = np.hstack((Y1, Y2)) + + Y = np.dot(Y, np.random.rand(2,D)); + Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1]) + Y -= Y.mean() + Y /= Y.std() + + if kernel_type == 'linear': + kernel = GPy.kern.linear(X.shape[1], ARD = 1) + elif kernel_type == 'rbf_inv': + kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1) + else: + kernel = GPy.kern.rbf(X.shape[1], ARD = 1) + kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1]) + m = GPy.models.GPRegression(X, Y, kernel) + #len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25 + #m.set_prior('.*lengthscale',len_prior) + + m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1) + + m.kern.plot_ARD() + print(m) + return m + +def toy_ARD_sparse(optim_iters=1000, kernel_type='linear', N=300, D=4): + # Create an artificial dataset where the values in the targets (Y) + # only depend in dimensions 1 and 3 of the inputs (X). Run ARD to + # see if this dependency can be recovered + X1 = np.sin(np.sort(np.random.rand(N,1)*10,0)) + X2 = np.cos(np.sort(np.random.rand(N,1)*10,0)) + X3 = np.exp(np.sort(np.random.rand(N,1),0)) + X4 = np.log(np.sort(np.random.rand(N,1),0)) + X = np.hstack((X1, X2, X3, X4)) + + Y1 = np.asarray(2*X[:,0]+3)[:,None] + Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0]))[:,None] + Y = np.hstack((Y1, Y2)) + + Y = np.dot(Y, np.random.rand(2,D)); + Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1]) + Y -= Y.mean() + Y /= Y.std() + + if kernel_type == 'linear': + kernel = GPy.kern.linear(X.shape[1], ARD = 1) + elif kernel_type == 'rbf_inv': + kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1) + else: + kernel = GPy.kern.rbf(X.shape[1], ARD = 1) + kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1]) + X_variance = np.ones(X.shape)*0.5 + m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance = X_variance) + #len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25 + #m.set_prior('.*lengthscale',len_prior) + + m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1) + + m.kern.plot_ARD() + print(m) + return m + def silhouette(optim_iters=100): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" data = GPy.util.datasets.silhouette() diff --git a/GPy/inference/optimization.py b/GPy/inference/optimization.py index 433d5f41..0ef487af 100644 --- a/GPy/inference/optimization.py +++ b/GPy/inference/optimization.py @@ -4,6 +4,7 @@ import pylab as pb import datetime as dt from scipy import optimize +from warnings import warn try: import rasmussens_minimize as rasm @@ -198,17 +199,22 @@ class opt_rasm(Optimizer): class opt_SCG(Optimizer): def __init__(self, *args, **kwargs): + if 'max_f_eval' in kwargs: + warn("max_f_eval deprecated for SCG optimizer: use max_iters instead!\nIgnoring max_f_eval!", FutureWarning) Optimizer.__init__(self, *args, **kwargs) + self.opt_name = "Scaled Conjugate Gradients" def opt(self, f_fp=None, f=None, fp=None): assert not f is None assert not fp is None + opt_result = SCG(f, fp, self.x_init, display=self.messages, maxiters=self.max_iters, max_f_eval=self.max_f_eval, xtol=self.xtol, ftol=self.ftol, gtol=self.gtol) + self.x_opt = opt_result[0] self.trace = opt_result[1] self.f_opt = self.trace[-1] diff --git a/GPy/inference/scg.py b/GPy/inference/scg.py index ba72bf60..7c8dda8d 100644 --- a/GPy/inference/scg.py +++ b/GPy/inference/scg.py @@ -35,7 +35,7 @@ def exponents(fnow, current_grad): exps = [np.abs(fnow), current_grad] return np.sign(exps) * np.log10(exps).astype(int) -def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xtol=None, ftol=None, gtol=None): +def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, xtol=None, ftol=None, gtol=None): """ Optimisation through Scaled Conjugate Gradients (SCG) @@ -68,7 +68,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto nsuccess = 0 # nsuccess counts number of successes. beta = 1.0 # Initial scale parameter. betamin = 1.0e-60 # Lower bound on scale. - betamax = 1.0e100 # Upper bound on scale. + betamax = 1.0e50 # Upper bound on scale. status = "Not converged" flog = [fold] @@ -109,9 +109,9 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto fnew = f(xnew, *optargs) function_eval += 1 - if function_eval >= max_f_eval: - status = "maximum number of function evaluations exceeded" - break +# if function_eval >= max_f_eval: +# status = "maximum number of function evaluations exceeded" +# break # return x, flog, function_eval, status Delta = 2.*(fnew - fold) / (alpha * mu) @@ -131,13 +131,12 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto if display: print_out(len_maxiters, fnow, current_grad, beta, iteration) n_exps = exponents(fnow, current_grad) - if iteration - p_iter >= 6: + if iteration - p_iter >= 20 * np.random.rand(): a = iteration >= p_iter * 2.78 b = np.any(n_exps < exps) if a or b: - print '' - if a: p_iter = iteration + print '' if b: exps = n_exps @@ -184,7 +183,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto status = "maxiter exceeded" if display: - print "" print_out(len_maxiters, fnow, current_grad, beta, iteration) print "" print status diff --git a/GPy/kern/constructors.py b/GPy/kern/constructors.py index 5ae4e419..b549e139 100644 --- a/GPy/kern/constructors.py +++ b/GPy/kern/constructors.py @@ -5,6 +5,23 @@ import numpy as np from kern import kern import parts + +def rbf_inv(input_dim,variance=1., inv_lengthscale=None,ARD=False): + """ + Construct an RBF kernel + + :param input_dim: dimensionality of the kernel, obligatory + :type input_dim: int + :param variance: the variance of the kernel + :type variance: float + :param lengthscale: the lengthscale of the kernel + :type lengthscale: float + :param ARD: Auto Relevance Determination (one lengthscale per dimension) + :type ARD: Boolean + """ + part = parts.rbf_inv.RBFInv(input_dim,variance,inv_lengthscale,ARD) + return kern(input_dim, [part]) + def rbf(input_dim,variance=1., lengthscale=None,ARD=False): """ Construct an RBF kernel @@ -306,4 +323,4 @@ def hierarchical(k): # for sl in k.input_slices: # assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)" _parts = [parts.hierarchical.Hierarchical(k.parts)] - return kern(k.input_dim+1,_parts) + return kern(k.input_dim+len(k.parts),_parts) diff --git a/GPy/kern/parts/__init__.py b/GPy/kern/parts/__init__.py index f9b40888..e39b70c2 100644 --- a/GPy/kern/parts/__init__.py +++ b/GPy/kern/parts/__init__.py @@ -20,3 +20,4 @@ import spline import symmetric import white import hierarchical +import rbf_inv diff --git a/GPy/kern/parts/hierarchical.py b/GPy/kern/parts/hierarchical.py index db495ba7..ab96fdd7 100644 --- a/GPy/kern/parts/hierarchical.py +++ b/GPy/kern/parts/hierarchical.py @@ -24,26 +24,26 @@ class Hierarchical(Kernpart): return np.hstack([k._get_params() for k in self.parts]) def _set_params(self,x): - [k._set_params(x[start:stop]) for start, stop in zip(self.param_starts, self.param_stops)] + [k._set_params(x[start:stop]) for k, start, stop in zip(self.parts, self.param_starts, self.param_stops)] def _get_param_names(self): - return self.k._get_param_names() + return sum([[str(i)+'_'+k.name+'_'+n for n in k._get_param_names()] for i,k in enumerate(self.parts)],[]) def _sort_slices(self,X,X2): - slices = [index_to_slices(x) for x in X[-self.levels:].T] - X = X[:-self.levels] + slices = [index_to_slices(x) for x in X[:,-self.levels:].T] + X = X[:,:-self.levels] if X2 is None: slices2 = slices X2 = X else: - slices2 = [index_to_slices(x) for x in X2[-self.levels:].T] - X2 = X2[:-self.levels] + slices2 = [index_to_slices(x) for x in X2[:,-self.levels:].T] + X2 = X2[:,:-self.levels] return X, X2, slices, slices2 def K(self,X,X2,target): X, X2, slices, slices2 = self._sort_slices(X,X2) - [[[k.K(X[s],X2[s2],target[s,s2]) for s in slices_i] for s2 in slices_j] for k,slices_i,slices_j in zip(self.parts,slices,slices2)] + [[[[k.K(X[s],X2[s2],target[s,s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices_,slices2_)] for k, slices_, slices2_ in zip(self.parts,slices,slices2)] def Kdiag(self,X,target): raise NotImplementedError @@ -51,7 +51,8 @@ class Hierarchical(Kernpart): #[[self.k.Kdiag(X[s],target[s]) for s in slices_i] for slices_i in slices] def dK_dtheta(self,dL_dK,X,X2,target): - [[[k.dK_dtheta(dL_dK[s,s2],X[s],X2[s2],target[p_start:p_stop]) for s in slices_i] for s2 in slices_j] for k,slices_i,slices_j, p_start, p_stop in zip(self.parts, slices, slices2, self.param_starts, self.param_stops)] + X, X2, slices, slices2 = self._sort_slices(X,X2) + [[[[k.dK_dtheta(dL_dK[s,s2],X[s],X2[s2],target[p_start:p_stop]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices_, slices2_)] for k, p_start, p_stop, slices_, slices2_ in zip(self.parts, self.param_starts, self.param_stops, slices, slices2)] def dK_dX(self,dL_dK,X,X2,target): diff --git a/GPy/kern/parts/rbf_inv.py b/GPy/kern/parts/rbf_inv.py new file mode 100644 index 00000000..52e93968 --- /dev/null +++ b/GPy/kern/parts/rbf_inv.py @@ -0,0 +1,335 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +from kernpart import Kernpart +import numpy as np +import hashlib +from scipy import weave +from ...util.linalg import tdot + +class RBFInv(Kernpart): + """ + Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: + + .. math:: + + k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2} + + where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input. + + :param input_dim: the number of input dimensions + :type input_dim: int + :param variance: the variance of the kernel + :type variance: float + :param lengthscale: the vector of lengthscale of the kernel + :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter) + :param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension. + :type ARD: Boolean + :rtype: kernel object + + .. Note: this object implements both the ARD and 'spherical' version of the function + """ + + def __init__(self, input_dim, variance=1., inv_lengthscale=None, ARD=False): + self.input_dim = input_dim + self.name = 'rbf' + self.ARD = ARD + if not ARD: + self.num_params = 2 + if inv_lengthscale is not None: + inv_lengthscale = np.asarray(inv_lengthscale) + assert inv_lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel" + else: + inv_lengthscale = np.ones(1) + else: + self.num_params = self.input_dim + 1 + if inv_lengthscale is not None: + inv_lengthscale = np.asarray(inv_lengthscale) + assert inv_lengthscale.size == self.input_dim, "bad number of lengthscales" + else: + inv_lengthscale = np.ones(self.input_dim) + + self._set_params(np.hstack((variance, inv_lengthscale.flatten()))) + + # initialize cache + self._Z, self._mu, self._S = np.empty(shape=(3, 1)) + self._X, self._X2, self._params = np.empty(shape=(3, 1)) + + # a set of optional args to pass to weave + self.weave_options = {'headers' : [''], + 'extra_compile_args': ['-fopenmp -O3'], # -march=native'], + 'extra_link_args' : ['-lgomp']} + + + + def _get_params(self): + return np.hstack((self.variance, self.inv_lengthscale)) + + def _set_params(self, x): + assert x.size == (self.num_params) + self.variance = x[0] + self.inv_lengthscale = x[1:] + self.lengthscale = 1./self.inv_lengthscale + self.lengthscale2 = np.square(self.lengthscale) + # reset cached results + self._X, self._X2, self._params = np.empty(shape=(3, 1)) + self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S + + def _get_param_names(self): + if self.num_params == 2: + return ['variance', 'inv_lengthscale'] + else: + return ['variance'] + ['inv_lengthscale_%i' % i for i in range(self.inv_lengthscale.size)] + + def K(self, X, X2, target): + self._K_computations(X, X2) + target += self.variance * self._K_dvar + + def Kdiag(self, X, target): + np.add(target, self.variance, target) + + def dK_dtheta(self, dL_dK, X, X2, target): + self._K_computations(X, X2) + target[0] += np.sum(self._K_dvar * dL_dK) + if self.ARD: + dvardLdK = self._K_dvar * dL_dK + var_len3 = self.variance / np.power(self.lengthscale, 3) + len2 = self.lengthscale2 + if X2 is None: + # save computation for the symmetrical case + dvardLdK = dvardLdK + dvardLdK.T + code = """ + int q,i,j; + double tmp; + for(q=0; q + #include + """ + weave.inline(code, support_code=support_code, libraries=['gomp'], + arg_names=['N','num_inducing','input_dim','mu','Zhat','mudist_sq','mudist','lengthscale2','_psi2_denom','psi2_Zdist_sq','psi2_exponent','half_log_psi2_denom','psi2','variance_sq'], + type_converters=weave.converters.blitz, **self.weave_options) + + return mudist, mudist_sq, psi2_exponent, psi2 diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 885372a1..ea78a011 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -8,6 +8,7 @@ from svigp_regression import SVIGPRegression from sparse_gp_classification import SparseGPClassification from fitc_classification import FITCClassification from gplvm import GPLVM +from sparse_gplvm import SparseGPLVM from warped_gp import WarpedGP from bayesian_gplvm import BayesianGPLVM from mrd import MRD diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 305ad120..2e81b370 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -41,6 +41,12 @@ class GPLVM(GP): else: return np.random.randn(Y.shape[0], input_dim) + def getstate(self): + return GP.getstate(self) + + def setstate(self, state): + GP.setstate(self, state) + def _get_param_names(self): return sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) + GP._get_param_names(self) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 327c198f..228494aa 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -18,29 +18,25 @@ class MRD(Model): All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn, N must be shared across datasets though. - :param likelihood_list...: likelihoods of observed datasets - :type likelihood_list: [GPy.likelihood] | [Y1..Yy] + :param likelihood_list: list of observed datasets (:py:class:`~GPy.likelihoods.gaussian.Gaussian` if not supplied directly) + :type likelihood_list: [:py:class:`~GPy.likelihoods.likelihood.likelihood` | :py:class:`ndarray`] :param names: names for different gplvm models :type names: [str] - :param input_dim: latent dimensionality (will raise + :param input_dim: latent dimensionality :type input_dim: int - :param initx: initialisation method for the latent space - :type initx: 'PCA'|'random' + :param initx: initialisation method for the latent space : + + * 'concat' - PCA on concatenation of all datasets + * 'single' - Concatenation of PCA on datasets, respectively + * 'random' - Random draw from a normal + + :type initx: ['concat'|'single'|'random'] :param initz: initialisation method for inducing inputs :type initz: 'permute'|'random' - :param X: - Initial latent space - :param X_variance: - Initial latent space variance - :param init: [cooncat|single|random] - initialization method to use: - *concat: PCA on concatenated outputs - *single: PCA on each output - *random: random - :param num_inducing: - number of inducing inputs to use - :param Z: - initial inducing inputs + :param X: Initial latent space + :param X_variance: Initial latent space variance + :param Z: initial inducing inputs + :param num_inducing: number of inducing inputs to use :param kernels: list of kernels or kernel shared for all BGPLVMS :type kernels: [GPy.kern.kern] | GPy.kern.kern | None (default) """