diff --git a/GPy/core/GP.py b/GPy/core/GP.py new file mode 100644 index 00000000..856c8a5c --- /dev/null +++ b/GPy/core/GP.py @@ -0,0 +1,146 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +import numpy as np +from scipy import linalg +import pylab as pb +from .. import kern +from ..util.linalg import pdinv, mdot, tdot +#from ..util.plot import gpplot, Tango +from ..likelihoods import EP +from gp_base import GPBase + +class GP(GPBase): + """ + Gaussian Process model for regression and EP + + :param X: input observations + :param kernel: a GPy kernel, defaults to rbf+white + :parm likelihood: a GPy likelihood + :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) + :type normalize_X: False|True + :rtype: model object + :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 + :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] + :type powerep: list + + .. Note:: Multiple independent outputs are allowed using columns of Y + + """ + def __init__(self, X, likelihood, kernel, normalize_X=False): + super(GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X) + self._set_params(self._get_params()) + + def _set_params(self, p): + self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()]) + self.likelihood._set_params(p[self.kern.Nparam_transformed():]) + + self.K = self.kern.K(self.X) + self.K += self.likelihood.covariance_matrix + + self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) + + # the gradient of the likelihood wrt the covariance matrix + if self.likelihood.YYT is None: + #alpha = np.dot(self.Ki, self.likelihood.Y) + alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1) + + self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki) + else: + #tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) + tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1) + tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1) + self.dL_dK = 0.5 * (tmp - self.D * self.Ki) + + def _get_param_names(self): + return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() + + def update_likelihood_approximation(self): + """ + Approximates a non-gaussian likelihood using Expectation Propagation + + For a Gaussian likelihood, no iteration is required: + this function does nothing + """ + self.likelihood.fit_full(self.kern.K(self.X)) + self._set_params(self._get_params()) # update the GP + + def _model_fit_term(self): + """ + Computes the model fit using YYT if it's available + """ + if self.likelihood.YYT is None: + tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1) + return -0.5 * np.sum(np.square(tmp)) + #return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y))) + else: + return -0.5 * np.sum(np.multiply(self.Ki, self.likelihood.YYT)) + + def log_likelihood(self): + """ + The log marginal likelihood of the GP. + + For an EP model, can be written as the log likelihood of a regression + model for a new variable Y* = v_tilde/tau_tilde, with a covariance + matrix K* = K + diag(1./tau_tilde) plus a normalization term. + """ + return -0.5 * self.D * self.K_logdet + self._model_fit_term() + self.likelihood.Z + + + def _log_likelihood_gradients(self): + """ + The gradient of all parameters. + + Note, we use the chain rule: dL_dtheta = dL_dK * d_K_dtheta + """ + return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) + + def _raw_predict(self, _Xnew, which_parts='all', full_cov=False,stop=False): + """ + Internal helper function for making predictions, does not account + for normalization or likelihood + """ + Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T + #KiKx = np.dot(self.Ki, Kx) + KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1) + mu = np.dot(KiKx.T, self.likelihood.Y) + if full_cov: + Kxx = self.kern.K(_Xnew, which_parts=which_parts) + var = Kxx - np.dot(KiKx.T, Kx) + else: + Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts) + var = Kxx - np.sum(np.multiply(KiKx, Kx), 0) + var = var[:, None] + if stop: + debug_this + return mu, var + + def predict(self, Xnew, which_parts='all', full_cov=False): + """ + Predict the function(s) at the new point(s) Xnew. + Arguments + --------- + :param Xnew: The points at which to make a prediction + :type Xnew: np.ndarray, Nnew x self.Q + :param which_parts: specifies which outputs kernel(s) to use in prediction + :type which_parts: ('all', list of bools) + :param full_cov: whether to return the folll covariance matrix, or just the diagonal + :type full_cov: bool + :rtype: posterior mean, a Numpy array, Nnew x self.D + :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise + :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D + + + If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. + This is to allow for different normalizations of the output dimensions. + + """ + # normalize X values + Xnew = (Xnew.copy() - self._Xmean) / self._Xstd + mu, var = self._raw_predict(Xnew, full_cov=full_cov, which_parts=which_parts) + + # now push through likelihood + mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) + + return mean, var, _025pm, _975pm diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py index bb97b04e..e49541b0 100644 --- a/GPy/core/__init__.py +++ b/GPy/core/__init__.py @@ -1,7 +1,8 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - +from GP import GP +from sparse_GP import sparse_GP from model import * from parameterised import * import priors diff --git a/GPy/core/gp_base.py b/GPy/core/gp_base.py new file mode 100644 index 00000000..8df1a5c1 --- /dev/null +++ b/GPy/core/gp_base.py @@ -0,0 +1,129 @@ +import numpy as np +import model +from .. import kern +from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D +import pylab as pb + +class GPBase(model): + """ + Gaussian Process model for holding shared behaviour between + sprase_GP and GP models + """ + + def __init__(self, X, likelihood, kernel, normalize_X=False): + self.X = X + assert len(self.X.shape) == 2 + self.N, self.Q = self.X.shape + assert isinstance(kernel, kern.kern) + self.kern = kernel + self.likelihood = likelihood + assert self.X.shape[0] == self.likelihood.data.shape[0] + self.N, self.D = self.likelihood.data.shape + + if normalize_X: + self._Xmean = X.mean(0)[None, :] + self._Xstd = X.std(0)[None, :] + self.X = (X.copy() - self._Xmean) / self._Xstd + + super(GPBase, self).__init__() + + # All leaf nodes should call self._set_params(self._get_params()) at + # the end + + def _get_params(self): + return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) + + def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False): + """ + Plot the GP's view of the world, where the data is normalized and the + likelihood is Gaussian. + + :param samples: the number of a posteriori samples to plot + :param which_data: which if the training data to plot (default all) + :type which_data: 'all' or a slice object to slice self.X, self.Y + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :param which_parts: which of the kernel functions to plot (additively) + :type which_parts: 'all', or list of bools + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + + Plot the posterior of the GP. + - In one dimension, the function is plotted with a shaded region identifying two standard deviations. + - In two dimsensions, a contour-plot shows the mean predicted function + - In higher dimensions, we've no implemented this yet !TODO! + + Can plot only part of the data and part of the posterior functions + using which_data and which_functions + """ + if which_data == 'all': + which_data = slice(None) + + if self.X.shape[1] == 1: + Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) + if samples == 0: + m, v = self._raw_predict(Xnew, which_parts=which_parts) + gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v)) + pb.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) + else: + m, v = self._raw_predict(Xnew, which_parts=which_parts, full_cov=True) + Ysim = np.random.multivariate_normal(m.flatten(), v, samples) + gpplot(Xnew, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None]) + for i in range(samples): + pb.plot(Xnew, Ysim[i, :], Tango.colorsHex['darkBlue'], linewidth=0.25) + pb.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) + pb.xlim(xmin, xmax) + ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + pb.ylim(ymin, ymax) + if hasattr(self, 'Z'): + pb.plot(self.Z, self.Z * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12) + + elif self.X.shape[1] == 2: + resolution = resolution or 50 + Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) + m, v = self._raw_predict(Xnew, which_parts=which_parts) + m = m.reshape(resolution, resolution).T + pb.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) + pb.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) + pb.xlim(xmin[0], xmax[0]) + pb.ylim(xmin[1], xmax[1]) + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + + def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20): + """ + TODO: Docstrings! + :param levels: for 2D plotting, the number of contour levels to use + + """ + # TODO include samples + if which_data == 'all': + which_data = slice(None) + + if self.X.shape[1] == 1: + + Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now + + Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) + m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + for d in range(m.shape[1]): + gpplot(Xnew, m[:,d], lower[:,d], upper[:,d]) + pb.plot(Xu[which_data], self.likelihood.data[which_data,d], 'kx', mew=1.5) + ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + pb.xlim(xmin, xmax) + pb.ylim(ymin, ymax) + + elif self.X.shape[1] == 2: # FIXME + resolution = resolution or 50 + Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) + x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) + m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + m = m.reshape(resolution, resolution).T + pb.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) + Yf = self.likelihood.Y.flatten() + pb.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) + pb.xlim(xmin[0], xmax[0]) + pb.ylim(xmin[1], xmax[1]) + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" diff --git a/GPy/core/model.py b/GPy/core/model.py index 5dc6b254..dea55319 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -22,8 +22,8 @@ class model(parameterised): self.priors = [None for i in range(self._get_params().size)] self.optimization_runs = [] self.sampling_runs = [] - self._set_params(self._get_params()) self.preferred_optimizer = 'tnc' + #self._set_params(self._get_params()) has been taken out as it should only be called on leaf nodes def _get_params(self): raise NotImplementedError, "this needs to be implemented to use the model class" def _set_params(self, x): diff --git a/GPy/models/sparse_GP.py b/GPy/core/sparse_GP.py similarity index 91% rename from GPy/models/sparse_GP.py rename to GPy/core/sparse_GP.py index fad18dc6..89632aa1 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/core/sparse_GP.py @@ -4,13 +4,12 @@ import numpy as np import pylab as pb from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides, chol_inv -from ..util.plot import gpplot -from .. import kern from GP import GP from scipy import linalg from ..likelihoods import Gaussian +from gp_base import GPBase -class sparse_GP(GP): +class sparse_GP(GPBase): """ Variational sparse GP model @@ -31,6 +30,8 @@ class sparse_GP(GP): """ def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): + super(sparse_GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X) + self.Z = Z self.M = Z.shape[0] self.likelihood = likelihood @@ -42,13 +43,13 @@ class sparse_GP(GP): self.has_uncertain_inputs = True self.X_variance = X_variance - GP.__init__(self, X, likelihood, kernel=kernel, normalize_X=normalize_X) + if normalize_X: + self.Z = (self.Z.copy() - self._Xmean) / self._Xstd # normalize X uncertainty also if self.has_uncertain_inputs: self.X_variance /= np.square(self._Xstd) - def _compute_kernel_matrices(self): # kernel computations, using BGPLVM notation self.Kmm = self.kern.K(self.Z) @@ -139,8 +140,6 @@ class sparse_GP(GP): self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision) self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V))) - - def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ if self.likelihood.is_heteroscedastic: @@ -282,3 +281,17 @@ class sparse_GP(GP): return mean, var, _025pm, _975pm + def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20): + super(sparse_GP, self).plot(samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20) + if self.X.shape[1] == 1: + Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now + if self.has_uncertain_inputs: + pb.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], + xerr=2 * np.sqrt(self.X_variance[which_data, 0]), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + Zu = self.Z * self._Xstd + self._Xmean + pb.plot(Zu, Zu * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12) + # pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) + + elif self.X.shape[1] == 2: # FIXME + pb.plot(self.Z[:, 0], self.Z[:, 1], 'wo') diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 5f22526b..52779e8e 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -5,7 +5,7 @@ import numpy as np import pylab as pb import sys, pdb from GPLVM import GPLVM -from sparse_GP import sparse_GP +from ..core import sparse_GP from GPy.util.linalg import pdinv from ..likelihoods import Gaussian from .. import kern @@ -65,6 +65,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): self._savedABCD = [] sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs) + self._set_params(self._get_params()) @property def oldps(self): @@ -96,7 +97,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): def _clipped(self, x): return x # np.clip(x, -1e300, 1e300) - + def _set_params(self, x, save_old=True, save_count=0): # try: x = self._clipped(x) diff --git a/GPy/models/FITC.py b/GPy/models/FITC.py index 0f948d32..da2c4d84 100644 --- a/GPy/models/FITC.py +++ b/GPy/models/FITC.py @@ -7,7 +7,7 @@ from ..util.linalg import mdot, jitchol, chol_inv, tdot, symmetrify,pdinv from ..util.plot import gpplot from .. import kern from scipy import stats, linalg -from sparse_GP import sparse_GP +from ..core import sparse_GP def backsub_both_sides(L,X): """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" @@ -16,6 +16,9 @@ def backsub_both_sides(L,X): class FITC(sparse_GP): + def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): + super(FITC, self).__init__(X, likelihood, kernel, normalize_X=normalize_X) + def update_likelihood_approximation(self): """ Approximates a non-gaussian likelihood using Expectation Propagation diff --git a/GPy/models/GP.py b/GPy/models/GP.py deleted file mode 100644 index e6c4c1d6..00000000 --- a/GPy/models/GP.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). -# Licensed under the BSD 3-clause license (see LICENSE.txt) - - -import numpy as np -from scipy import linalg -import pylab as pb -from .. import kern -from ..core import model -from ..util.linalg import pdinv, mdot, tdot -from ..util.plot import gpplot, x_frame1D, x_frame2D, Tango -from ..likelihoods import EP - -class GP(model): - """ - Gaussian Process model for regression and EP - - :param X: input observations - :param kernel: a GPy kernel, defaults to rbf+white - :parm likelihood: a GPy likelihood - :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) - :type normalize_X: False|True - :rtype: model object - :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 - :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] - :type powerep: list - - .. Note:: Multiple independent outputs are allowed using columns of Y - - """ - def __init__(self, X, likelihood, kernel, normalize_X=False): - - # parse arguments - self.X = X - assert len(self.X.shape) == 2 - self.N, self.Q = self.X.shape - assert isinstance(kernel, kern.kern) - self.kern = kernel - self.likelihood = likelihood - assert self.X.shape[0] == self.likelihood.data.shape[0] - self.N, self.D = self.likelihood.data.shape - - # here's some simple normalization for the inputs - if normalize_X: - self._Xmean = X.mean(0)[None, :] - self._Xstd = X.std(0)[None, :] - self.X = (X.copy() - self._Xmean) / self._Xstd - if hasattr(self, 'Z'): - self.Z = (self.Z - self._Xmean) / self._Xstd - else: - self._Xmean = np.zeros((1, self.X.shape[1])) - self._Xstd = np.ones((1, self.X.shape[1])) - - if not hasattr(self,'has_uncertain_inputs'): - self.has_uncertain_inputs = False - model.__init__(self) - - def dL_dZ(self): - """ - TODO: one day we might like to learn Z by gradient methods? - """ - #FIXME: this doesn;t live here. - return np.zeros_like(self.Z) - - def _set_params(self, p): - self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()]) - self.likelihood._set_params(p[self.kern.Nparam_transformed():]) - - self.K = self.kern.K(self.X) - self.K += self.likelihood.covariance_matrix - - self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) - - # the gradient of the likelihood wrt the covariance matrix - if self.likelihood.YYT is None: - #alpha = np.dot(self.Ki, self.likelihood.Y) - alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1) - - self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki) - else: - #tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) - tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1) - tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1) - self.dL_dK = 0.5 * (tmp - self.D * self.Ki) - - def _get_params(self): - return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) - - def _get_param_names(self): - return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() - - def update_likelihood_approximation(self): - """ - Approximates a non-gaussian likelihood using Expectation Propagation - - For a Gaussian likelihood, no iteration is required: - this function does nothing - """ - self.likelihood.fit_full(self.kern.K(self.X)) - self._set_params(self._get_params()) # update the GP - - def _model_fit_term(self): - """ - Computes the model fit using YYT if it's available - """ - if self.likelihood.YYT is None: - tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1) - return -0.5 * np.sum(np.square(tmp)) - #return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y))) - else: - return -0.5 * np.sum(np.multiply(self.Ki, self.likelihood.YYT)) - - def log_likelihood(self): - """ - The log marginal likelihood of the GP. - - For an EP model, can be written as the log likelihood of a regression - model for a new variable Y* = v_tilde/tau_tilde, with a covariance - matrix K* = K + diag(1./tau_tilde) plus a normalization term. - """ - return -0.5 * self.D * self.K_logdet + self._model_fit_term() + self.likelihood.Z - - - def _log_likelihood_gradients(self): - """ - The gradient of all parameters. - - Note, we use the chain rule: dL_dtheta = dL_dK * d_K_dtheta - """ - return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) - - def _raw_predict(self, _Xnew, which_parts='all', full_cov=False,stop=False): - """ - Internal helper function for making predictions, does not account - for normalization or likelihood - """ - Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T - #KiKx = np.dot(self.Ki, Kx) - KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1) - mu = np.dot(KiKx.T, self.likelihood.Y) - if full_cov: - Kxx = self.kern.K(_Xnew, which_parts=which_parts) - var = Kxx - np.dot(KiKx.T, Kx) - else: - Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts) - var = Kxx - np.sum(np.multiply(KiKx, Kx), 0) - var = var[:, None] - if stop: - debug_this - return mu, var - - - def predict(self, Xnew, which_parts='all', full_cov=False): - """ - Predict the function(s) at the new point(s) Xnew. - - Arguments - --------- - :param Xnew: The points at which to make a prediction - :type Xnew: np.ndarray, Nnew x self.Q - :param which_parts: specifies which outputs kernel(s) to use in prediction - :type which_parts: ('all', list of bools) - :param full_cov: whether to return the folll covariance matrix, or just the diagonal - :type full_cov: bool - :rtype: posterior mean, a Numpy array, Nnew x self.D - :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise - :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D - - - If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. - This is to allow for different normalizations of the output dimensions. - - """ - # normalize X values - Xnew = (Xnew.copy() - self._Xmean) / self._Xstd - mu, var = self._raw_predict(Xnew, full_cov=full_cov, which_parts=which_parts) - - # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) - - return mean, var, _025pm, _975pm - - - def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False): - """ - Plot the GP's view of the world, where the data is normalized and the - likelihood is Gaussian. - - :param samples: the number of a posteriori samples to plot - :param which_data: which if the training data to plot (default all) - :type which_data: 'all' or a slice object to slice self.X, self.Y - :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits - :param which_parts: which of the kernel functions to plot (additively) - :type which_parts: 'all', or list of bools - :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D - - Plot the posterior of the GP. - - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - - In two dimsensions, a contour-plot shows the mean predicted function - - In higher dimensions, we've no implemented this yet !TODO! - - Can plot only part of the data and part of the posterior functions - using which_data and which_functions - """ - if which_data == 'all': - which_data = slice(None) - - if self.X.shape[1] == 1: - Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) - if samples == 0: - m, v = self._raw_predict(Xnew, which_parts=which_parts) - gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v)) - pb.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) - else: - m, v = self._raw_predict(Xnew, which_parts=which_parts, full_cov=True) - Ysim = np.random.multivariate_normal(m.flatten(), v, samples) - gpplot(Xnew, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None]) - for i in range(samples): - pb.plot(Xnew, Ysim[i, :], Tango.colorsHex['darkBlue'], linewidth=0.25) - pb.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) - pb.xlim(xmin, xmax) - ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - pb.ylim(ymin, ymax) - if hasattr(self, 'Z'): - pb.plot(self.Z, self.Z * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12) - - elif self.X.shape[1] == 2: - resolution = resolution or 50 - Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) - m, v = self._raw_predict(Xnew, which_parts=which_parts) - m = m.reshape(resolution, resolution).T - pb.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - pb.scatter(Xorig[:, 0], Xorig[:, 1], 40, Yorig, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) - pb.xlim(xmin[0], xmax[0]) - pb.ylim(xmin[1], xmax[1]) - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - - def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20): - """ - TODO: Docstrings! - :param levels: for 2D plotting, the number of contour levels to use - - """ - # TODO include samples - if which_data == 'all': - which_data = slice(None) - - if self.X.shape[1] == 1: - - Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now - - Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) - for d in range(m.shape[1]): - gpplot(Xnew, m[:,d], lower[:,d], upper[:,d]) - pb.plot(Xu[which_data], self.likelihood.data[which_data,d], 'kx', mew=1.5) - if self.has_uncertain_inputs: - pb.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], - xerr=2 * np.sqrt(self.X_variance[which_data, 0]), - ecolor='k', fmt=None, elinewidth=.5, alpha=.5) - - ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - pb.xlim(xmin, xmax) - pb.ylim(ymin, ymax) - if hasattr(self, 'Z'): - Zu = self.Z * self._Xstd + self._Xmean - pb.plot(Zu, Zu * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12) - # pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) - - elif self.X.shape[1] == 2: # FIXME - resolution = resolution or 50 - Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) - x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) - m = m.reshape(resolution, resolution).T - pb.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - Yf = self.likelihood.Y.flatten() - pb.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) - pb.xlim(xmin[0], xmax[0]) - pb.ylim(xmin[1], xmax[1]) - if hasattr(self, 'Z'): - pb.plot(self.Z[:, 0], self.Z[:, 1], 'wo') - - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" diff --git a/GPy/models/GPLVM.py b/GPy/models/GPLVM.py index 7445d0ab..5d89527b 100644 --- a/GPy/models/GPLVM.py +++ b/GPy/models/GPLVM.py @@ -8,7 +8,7 @@ import sys, pdb from .. import kern from ..core import model from ..util.linalg import pdinv, PCA -from GP import GP +from ..core import GP from ..likelihoods import Gaussian from .. import util from GPy.util import plot_latent @@ -32,7 +32,8 @@ class GPLVM(GP): if kernel is None: kernel = kern.rbf(Q, ARD=Q>1) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) likelihood = Gaussian(Y, normalize=normalize_Y) - GP.__init__(self, X, likelihood, kernel, **kwargs) + super(GPLVM, self).__init__(self, X, likelihood, kernel, **kwargs) + self._set_params(self._get_params()) def initialise_latent(self, init, Q, Y): if init == 'PCA': @@ -63,4 +64,4 @@ class GPLVM(GP): pb.plot(mu[:,0], mu[:,1],'k',linewidth=1.5) def plot_latent(self, *args, **kwargs): - util.plot_latent.plot_latent(self, *args, **kwargs) \ No newline at end of file + util.plot_latent.plot_latent(self, *args, **kwargs) diff --git a/GPy/models/GP_regression.py b/GPy/models/GP_regression.py index 7f2673a6..2979456b 100644 --- a/GPy/models/GP_regression.py +++ b/GPy/models/GP_regression.py @@ -3,7 +3,7 @@ import numpy as np -from GP import GP +from ..core import GP from .. import likelihoods from .. import kern @@ -31,4 +31,5 @@ class GP_regression(GP): likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) - GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) + super(GP_regression, self).__init__(self, X, likelihood, kernel, normalize_X=normalize_X) + self._set_params(self._get_params()) diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 7cacffb1..700ea120 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -2,9 +2,9 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from GP import GP +#from GP import GP +#from sparse_GP import sparse_GP from GP_regression import GP_regression -from sparse_GP import sparse_GP from sparse_GP_regression import sparse_GP_regression from GPLVM import GPLVM from warped_GP import warpedGP diff --git a/GPy/models/generalized_FITC.py b/GPy/models/generalized_FITC.py index 966cbd39..514a35a1 100644 --- a/GPy/models/generalized_FITC.py +++ b/GPy/models/generalized_FITC.py @@ -7,7 +7,7 @@ from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot from ..util.plot import gpplot from .. import kern from scipy import stats, linalg -from sparse_GP import sparse_GP +from ..core import sparse_GP def backsub_both_sides(L,X): """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" @@ -36,12 +36,12 @@ class generalized_FITC(sparse_GP): """ def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): - self.Z = Z self.M = self.Z.shape[0] self.true_precision = likelihood.precision - sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False) + super(generalized_FITC, self).__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False) + self._set_params(self._get_params()) def _set_params(self, p): self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 89b5d730..88930040 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -5,7 +5,7 @@ Created on 10 Apr 2013 ''' from GPy.core import model from GPy.models.Bayesian_GPLVM import Bayesian_GPLVM -from GPy.models.sparse_GP import sparse_GP +from GPy.core import sparse_GP from GPy.util.linalg import PCA from scipy import linalg import numpy @@ -23,7 +23,7 @@ class MRD(model): :type likelihood_list: [GPy.likelihood] | [Y1..Yy] :param names: names for different gplvm models :type names: [str] - :param Q: latent dimensionality (will raise + :param Q: latent dimensionality (will raise :type Q: int :param initx: initialisation method for the latent space :type initx: 'PCA'|'random' @@ -77,6 +77,7 @@ class MRD(model): self.MQ = self.M * self.Q model.__init__(self) # @UndefinedVariable + self._set_params(self._get_params()) @property def X(self): @@ -153,7 +154,7 @@ class MRD(model): def _get_params(self): """ return parameter list containing private and shared parameters as follows: - + ================================================================= | mu | S | Z || theta1 | theta2 | .. | thetaN | ================================================================= diff --git a/GPy/models/sparse_GPLVM.py b/GPy/models/sparse_GPLVM.py index 591c49b2..af388f1c 100644 --- a/GPy/models/sparse_GPLVM.py +++ b/GPy/models/sparse_GPLVM.py @@ -8,7 +8,7 @@ import sys, pdb # from .. import kern # from ..core import model # from ..util.linalg import pdinv, PCA -from GPLVM import GPLVM +from ..core import GPLVM from sparse_GP_regression import sparse_GP_regression class sparse_GPLVM(sparse_GP_regression, GPLVM): diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index 84a5d37c..d90ead3d 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -3,7 +3,7 @@ import numpy as np -from sparse_GP import sparse_GP +from ..core import sparse_GP from .. import likelihoods from .. import kern from ..likelihoods import likelihood @@ -43,4 +43,5 @@ class sparse_GP_regression(sparse_GP): #likelihood defaults to Gaussian likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) - sparse_GP.__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X) + super(sparse_GP_regression, self).__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X) + self._set_params(self._get_params()) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index 64d0b541..8e67b551 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -9,7 +9,7 @@ from ..util.linalg import pdinv from ..util.plot import gpplot from ..util.warping_functions import * from GP_regression import GP_regression -from GP import GP +from ..core import GP from .. import likelihoods from .. import kern @@ -29,7 +29,8 @@ class warpedGP(GP): self.predict_in_warped_space = False likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) - GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) + super(warpedGP, self).__init__(self, X, likelihood, kernel, normalize_X=normalize_X) + self._set_params(self._get_params()) def _scale_data(self, Y): self._Ymax = Y.max()