diff --git a/GPy/core/fitc.py b/GPy/core/fitc.py index c9cf6eb2..0d294d07 100644 --- a/GPy/core/fitc.py +++ b/GPy/core/fitc.py @@ -126,7 +126,7 @@ class FITC(SparseGP): self._dpsi1_dX += self.kern.dK_dX(_dpsi1.T,self.Z,self.X[i:i+1,:]) # the partial derivative vector for the likelihood - if self.likelihood.Nparams == 0: + if self.likelihood.num_params == 0: # save computation here. self.partial_for_likelihood = None elif self.likelihood.is_heteroscedastic: diff --git a/GPy/core/gp.py b/GPy/core/gp.py index a3ef6c80..67eb7c69 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -58,7 +58,6 @@ class GP(GPBase): def _get_params(self): return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) - def _get_param_names(self): return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() @@ -129,7 +128,7 @@ class GP(GPBase): debug_this # @UndefinedVariable return mu, var - def predict(self, Xnew, which_parts='all', full_cov=False, likelihood_args=dict()): + def predict(self, Xnew, which_parts='all', full_cov=False, **likelihood_args): """ Predict the function(s) at the new point(s) Xnew. @@ -156,67 +155,41 @@ class GP(GPBase): mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, **likelihood_args) return mean, var, _025pm, _975pm - def predict_single_output(self, Xnew, output=0, which_parts='all', full_cov=False): + def _raw_predict_single_output(self, _Xnew, output, which_parts='all', full_cov=False,stop=False): """ - For a specific output, predict the function at the new point(s) Xnew. - - :param Xnew: The points at which to make a prediction - :type Xnew: np.ndarray, Nnew x self.input_dim - :param output: output to predict - :type output: integer in {0,..., num_outputs-1} - :param which_parts: specifies which outputs kernel(s) to use in prediction - :type which_parts: ('all', list of bools) - :param full_cov: whether to return the full covariance matrix, or just the diagonal - :type full_cov: bool - :returns: posterior mean, a Numpy array, Nnew x self.input_dim - :returns: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise - :returns: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.input_dim - - .. Note:: For multiple output models only - """ - assert hasattr(self,'multioutput'), 'This function is for multiple output models only.' - index = np.ones_like(Xnew)*output - Xnew = np.hstack((Xnew,index)) - - # normalize X values - Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale - mu, var = self._raw_predict(Xnew, full_cov=full_cov, which_parts=which_parts) - - # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, noise_model = output) - return mean, var, _025pm, _975pm - - def _raw_predict_single_output(self, _Xnew, output=0, which_parts='all', full_cov=False,stop=False): - """ - Internal helper function for making predictions for a specific output, - does not account for normalization or likelihood + For a specific output, calls _raw_predict() at the new point(s) _Xnew. + This functions calls _add_output_index(), so _Xnew should not have an index column specifying the output. --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.input_dim :param output: output to predict - :type output: integer in {0,..., num_outputs-1} + :type output: integer in {0,..., output_dim-1} :param which_parts: specifies which outputs kernel(s) to use in prediction :type which_parts: ('all', list of bools) :param full_cov: whether to return the full covariance matrix, or just the diagonal - .. Note:: For multiple output models only + .. Note:: For multiple non-independent outputs models only. """ - assert hasattr(self,'multioutput'), 'This function is for multiple output models only.' - # creates an index column and appends it to _Xnew - index = np.ones_like(_Xnew)*output - _Xnew = np.hstack((_Xnew,index)) + _Xnew = self._add_output_index(_Xnew, output) + return self._raw_predict(_Xnew, which_parts=which_parts,full_cov=full_cov, stop=stop) - Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T - KiKx, _ = dpotrs(self.L, np.asfortranarray(Kx), lower=1) - mu = np.dot(KiKx.T, self.likelihood.Y) - if full_cov: - Kxx = self.kern.K(_Xnew, which_parts=which_parts) - var = Kxx - np.dot(KiKx.T, Kx) - else: - Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts) - var = Kxx - np.sum(np.multiply(KiKx, Kx), 0) - var = var[:, None] - if stop: - debug_this # @UndefinedVariable - return mu, var + def predict_single_output(self, Xnew,output=0, which_parts='all', full_cov=False, likelihood_args=dict()): + """ + For a specific output, calls predict() at the new point(s) Xnew. + This functions calls _add_output_index(), so Xnew should not have an index column specifying the output. + + :param Xnew: The points at which to make a prediction + :type Xnew: np.ndarray, Nnew x self.input_dim + :param which_parts: specifies which outputs kernel(s) to use in prediction + :type which_parts: ('all', list of bools) + :param full_cov: whether to return the full covariance matrix, or just the diagonal + :type full_cov: bool + :returns: mean: posterior mean, a Numpy array, Nnew x self.input_dim + :returns: var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise + :returns: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.input_dim + + .. Note:: For multiple non-independent outputs models only. + """ + Xnew = self._add_output_index(Xnew, output) + return self.predict(Xnew, which_parts=which_parts, full_cov=full_cov, likelihood_args=likelihood_args) diff --git a/GPy/core/gp_base.py b/GPy/core/gp_base.py index bd0b877e..083f9980 100644 --- a/GPy/core/gp_base.py +++ b/GPy/core/gp_base.py @@ -3,13 +3,14 @@ from .. import kern from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D import pylab as pb from GPy.core.model import Model +import warnings +from ..likelihoods import Gaussian, Gaussian_Mixed_Noise class GPBase(Model): """ Gaussian process base model for holding shared behaviour between sparse_GP and GP models. """ - def __init__(self, X, likelihood, kernel, normalize_X=False): self.X = X assert len(self.X.shape) == 2 @@ -57,7 +58,59 @@ class GPBase(Model): self.X = state.pop() Model.setstate(self, state) - def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None,output=None): + def posterior_samples_f(self,X,size=10,which_parts='all',full_cov=True): + """ + Samples the posterior GP at the points X. + + :param X: The points at which to take the samples. + :type X: np.ndarray, Nnew x self.input_dim. + :param size: the number of a posteriori samples to plot. + :type size: int. + :param which_parts: which of the kernel functions to plot (additively). + :type which_parts: 'all', or list of bools. + :param full_cov: whether to return the full covariance matrix, or just the diagonal. + :type full_cov: bool. + :returns: Ysim: set of simulations, a Numpy array (N x samples). + """ + m, v = self._raw_predict(X, which_parts=which_parts, full_cov=full_cov) + v = v.reshape(m.size,-1) if len(v.shape)==3 else v + if not full_cov: + Ysim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T + else: + Ysim = np.random.multivariate_normal(m.flatten(), v, size).T + + return Ysim + + def posterior_samples(self,X,size=10,which_parts='all',full_cov=True,noise_model=None): + """ + Samples the posterior GP at the points X. + + :param X: the points at which to take the samples. + :type X: np.ndarray, Nnew x self.input_dim. + :param size: the number of a posteriori samples to plot. + :type size: int. + :param which_parts: which of the kernel functions to plot (additively). + :type which_parts: 'all', or list of bools. + :param full_cov: whether to return the full covariance matrix, or just the diagonal. + :type full_cov: bool. + :param noise_model: for mixed noise likelihood, the noise model to use in the samples. + :type noise_model: integer. + :returns: Ysim: set of simulations, a Numpy array (N x samples). + """ + Ysim = self.posterior_samples_f(X, size, which_parts=which_parts, full_cov=full_cov) + if isinstance(self.likelihood,Gaussian): + noise_std = np.sqrt(self.likelihood._get_params()) + Ysim += np.random.normal(0,noise_std,Ysim.shape) + elif isinstance(self.likelihood,Gaussian_Mixed_Noise): + assert noise_model is not None, "A noise model must be specified." + noise_std = np.sqrt(self.likelihood._get_params()[noise_model]) + Ysim += np.random.normal(0,noise_std,Ysim.shape) + else: + Ysim = self.likelihood.noise_model.samples(Ysim) + + return Ysim + + def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None): """ Plot the GP's view of the world, where the data is normalized and the - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -89,82 +142,41 @@ class GPBase(Model): fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - if not hasattr(self,'multioutput'): + if self.X.shape[1] == 1: + resolution = resolution or 200 + Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) - if self.X.shape[1] == 1: - Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) - if samples == 0: - m, v = self._raw_predict(Xnew, which_parts=which_parts) - gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v), axes=ax) - ax.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) - else: - m, v = self._raw_predict(Xnew, which_parts=which_parts, full_cov=True) - v = v.reshape(m.size,-1) if len(v.shape)==3 else v - Ysim = np.random.multivariate_normal(m.flatten(), v, samples) - gpplot(Xnew, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None, ], axes=ax) - for i in range(samples): - ax.plot(Xnew, Ysim[i, :], Tango.colorsHex['darkBlue'], linewidth=0.25) + m, v = self._raw_predict(Xnew, which_parts=which_parts) + if samples: + Ysim = self.posterior_samples_f(Xnew, samples, which_parts=which_parts, full_cov=True) + for yi in Ysim.T: + ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v), axes=ax) - ax.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) - ax.set_xlim(xmin, xmax) - ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - ax.set_ylim(ymin, ymax) + ax.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) + ax.set_xlim(xmin, xmax) + ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + ax.set_ylim(ymin, ymax) - if hasattr(self,'Z'): - Zu = self.Z * self._Xscale + self._Xoffset - ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) + elif self.X.shape[1] == 2: - elif self.X.shape[1] == 2: - resolution = resolution or 50 - Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) - m, v = self._raw_predict(Xnew, which_parts=which_parts) - m = m.reshape(resolution, resolution).T - ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable - ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) # @UndefinedVariable - ax.set_xlim(xmin[0], xmax[0]) - ax.set_ylim(xmin[1], xmax[1]) + resolution = resolution or 50 + Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) + m, v = self._raw_predict(Xnew, which_parts=which_parts) + m = m.reshape(resolution, resolution).T + ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable + ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) # @UndefinedVariable + ax.set_xlim(xmin[0], xmax[0]) + ax.set_ylim(xmin[1], xmax[1]) + + if samples: + warnings.warn("Samples only implemented for 1 dimensional inputs.") - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" else: - assert len(self.likelihood.noise_model_list) > output, 'The model has only %s outputs.' %self.num_outputs + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - if self.X.shape[1] == 2: - Xu = self.X[self.X[:,-1]==output ,0:1] - Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - - if samples == 0: - m, v = self._raw_predict_single_output(Xnew, output=output, which_parts=which_parts) - gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v), axes=ax) - ax.plot(Xu[which_data], self.likelihood.Y[self.likelihood.index==output][:,None], 'kx', mew=1.5) - else: - m, v = self._raw_predict_single_output(Xnew, output=output, which_parts=which_parts, full_cov=True) - v = v.reshape(m.size,-1) if len(v.shape)==3 else v - Ysim = np.random.multivariate_normal(m.flatten(), v, samples) - gpplot(Xnew, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None, ], axes=ax) - for i in range(samples): - ax.plot(Xnew, Ysim[i, :], Tango.colorsHex['darkBlue'], linewidth=0.25) - ax.set_xlim(xmin, xmax) - ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - ax.set_ylim(ymin, ymax) - - elif self.X.shape[1] == 3: - raise NotImplementedError, "Plots not implemented for multioutput models with 2D inputs...yet" - assert self.num_outputs >= output, 'The model has only %s outputs.' %self.num_outputs - - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - - if hasattr(self,'Z'): - Zu = self.Z[self.Z[:,-1]==output,:] - Zu = self.Z * self._Xscale + self._Xoffset - Zu = self.Z[self.Z[:,-1]==output ,0:1] #?? - ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) - - - def plot(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None, output=None, fixed_inputs=[], linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue']): + def plot(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None, fixed_inputs=[], linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue']): """ Plot the GP with noise where the likelihood is Gaussian. @@ -200,7 +212,6 @@ class GPBase(Model): :param fillcol: color of fill :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure """ - # TODO include samples if which_data == 'all': which_data = slice(None) @@ -208,98 +219,202 @@ class GPBase(Model): fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - if not hasattr(self,'multioutput'): + plotdims = self.input_dim - len(fixed_inputs) + if plotdims == 1: + resolution = resolution or 200 - plotdims = self.input_dim - len(fixed_inputs) - if plotdims == 1: - resolution = resolution or 200 + Xu = self.X * self._Xscale + self._Xoffset #NOTE self.X are the normalized values now - Xu = self.X * self._Xscale + self._Xoffset #NOTE self.X are the normalized values now + fixed_dims = np.array([i for i,v in fixed_inputs]) + freedim = np.setdiff1d(np.arange(self.input_dim),fixed_dims) - fixed_dims = np.array([i for i,v in fixed_inputs]) - freedim = np.setdiff1d(np.arange(self.input_dim),fixed_dims) + Xnew, xmin, xmax = x_frame1D(Xu[:,freedim], plot_limits=plot_limits) + Xgrid = np.empty((Xnew.shape[0],self.input_dim)) + Xgrid[:,freedim] = Xnew + for i,v in fixed_inputs: + Xgrid[:,i] = v - Xnew, xmin, xmax = x_frame1D(Xu[:,freedim], plot_limits=plot_limits) - Xgrid = np.empty((Xnew.shape[0],self.input_dim)) - Xgrid[:,freedim] = Xnew - for i,v in fixed_inputs: - Xgrid[:,i] = v + m, v, lower, upper = self.predict(Xgrid, which_parts=which_parts) - m, _, lower, upper = self.predict(Xgrid, which_parts=which_parts) - for d in range(m.shape[1]): - gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax, edgecol=linecol, fillcol=fillcol) - ax.plot(Xu[which_data,freedim], self.likelihood.data[which_data, d], 'kx', mew=1.5) - ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - ax.set_xlim(xmin, xmax) - ax.set_ylim(ymin, ymax) + if samples: #NOTE not tested with fixed_inputs + Ysim = self.posterior_samples(Xgrid, samples, which_parts=which_parts, full_cov=True) + for yi in Ysim.T: + ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. + for d in range(m.shape[1]): + gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax, edgecol=linecol, fillcol=fillcol) + ax.plot(Xu[which_data,freedim], self.likelihood.data[which_data, d], 'kx', mew=1.5) + ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + ax.set_xlim(xmin, xmax) + ax.set_ylim(ymin, ymax) + elif self.X.shape[1] == 2: - Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits,resolution=resolution) - m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) - for d in range(m.shape[1]): - gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax) - ax.plot(Xu[which_data], self.likelihood.data[which_data, d], 'kx', mew=1.5) - ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - ax.set_xlim(xmin, xmax) - ax.set_ylim(ymin, ymax) + resolution = resolution or 50 + Xnew, _, _, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) + x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) + m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) + m = m.reshape(resolution, resolution).T + ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable + Yf = self.likelihood.Y.flatten() + ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) # @UndefinedVariable + ax.set_xlim(xmin[0], xmax[0]) + ax.set_ylim(xmin[1], xmax[1]) - elif self.X.shape[1] == 2: - resolution = resolution or 50 - Xnew, _, _, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) - x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) - m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) - m = m.reshape(resolution, resolution).T - ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable - Yf = self.likelihood.Y.flatten() - ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) # @UndefinedVariable - ax.set_xlim(xmin[0], xmax[0]) - ax.set_ylim(xmin[1], xmax[1]) - - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + if samples: + warnings.warn("Samples only implemented for 1 dimensional inputs.") else: - assert len(self.likelihood.noise_model_list) > output, 'The model has only %s outputs.' %self.num_outputs - if self.X.shape[1] == 2: - resolution = resolution or 200 - Xu = self.X[self.X[:,-1]==output,:] #keep the output of interest - Xu = self.X * self._Xscale + self._Xoffset - Xu = self.X[self.X[:,-1]==output ,0:1] #get rid of the index column + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - m, _, lower, upper = self.predict_single_output(Xnew, which_parts=which_parts,output=output) + def plot_single_output_f(self, output=None, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None): + """ + For a specific output, in a multioutput model, this function works just as plot_f on single output models. - for d in range(m.shape[1]): - gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax) - ax.plot(Xu[which_data], self.likelihood.noise_model_list[output].data, 'kx', mew=1.5) - ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) - ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) - ax.set_xlim(xmin, xmax) - ax.set_ylim(ymin, ymax) + :param output: which output to plot (for multiple output models only) + :type output: integer (first output is 0) + :param samples: the number of a posteriori samples to plot + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :param which_data: which if the training data to plot (default all) + :type which_data: 'all' or a slice object to slice self.X, self.Y + :param which_parts: which of the kernel functions to plot (additively) + :type which_parts: 'all', or list of bools + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + :type resolution: int + :param full_cov: + :type full_cov: bool + :param fignum: figure to plot on. + :type fignum: figure number + :param ax: axes to plot on. + :type ax: axes handle + """ + assert output is not None, "An output must be specified." + assert len(self.likelihood.noise_model_list) > output, "The model has only %s outputs." %(self.output_dim + 1) - elif self.X.shape[1] == 3: - raise NotImplementedError, "Plots not yet implemented for multioutput models with 2D inputs" - resolution = resolution or 50 - - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - - """ - def samples_f(self,X,samples=10, which_data='all', which_parts='all',output=None): if which_data == 'all': which_data = slice(None) - if hasattr(self,'multioutput'): - np.hstack([X,np.ones((X.shape[0],1))*output]) + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) - m, v = self._raw_predict(X, which_parts=which_parts, full_cov=True) - v = v.reshape(m.size,-1) if len(v.shape)==3 else v - Ysim = np.random.multivariate_normal(m.flatten(), v, samples) - #gpplot(X, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None, ], axes=ax) - for i in range(samples): - ax.plot(X, Ysim[i, :], Tango.colorsHex['darkBlue'], linewidth=0.25) + if self.X.shape[1] == 2: + Xu = self.X[self.X[:,-1]==output ,0:1] + Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) + Xnew_indexed = self._add_output_index(Xnew,output) - """ + m, v = self._raw_predict(Xnew_indexed, which_parts=which_parts) + + if samples: + Ysim = self.posterior_samples_f(Xnew_indexed, samples, which_parts=which_parts, full_cov=True) + for yi in Ysim.T: + ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + + gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v), axes=ax) + ax.plot(Xu[which_data], self.likelihood.Y[self.likelihood.index==output][:,None], 'kx', mew=1.5) + ax.set_xlim(xmin, xmax) + ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None]))) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + ax.set_ylim(ymin, ymax) + + elif self.X.shape[1] == 3: + raise NotImplementedError, "Plots not implemented for multioutput models with 2D inputs...yet" + #if samples: + # warnings.warn("Samples only implemented for 1 dimensional inputs.") + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + + + def plot_single_output(self, output=None, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None, fixed_inputs=[], linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue']): + """ + For a specific output, in a multioutput model, this function works just as plot_f on single output models. + + :param output: which output to plot (for multiple output models only) + :type output: integer (first output is 0) + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :type plot_limits: np.array + :param which_data: which if the training data to plot (default all) + :type which_data: 'all' or a slice object to slice self.X, self.Y + :param which_parts: which of the kernel functions to plot (additively) + :type which_parts: 'all', or list of bools + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + :type resolution: int + :param levels: number of levels to plot in a contour plot. + :type levels: int + :param samples: the number of a posteriori samples to plot + :type samples: int + :param fignum: figure to plot on. + :type fignum: figure number + :param ax: axes to plot on. + :type ax: axes handle + :type output: integer (first output is 0) + :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. + :type fixed_inputs: a list of tuples + :param linecol: color of line to plot. + :type linecol: + :param fillcol: color of fill + :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure + """ + assert output is not None, "An output must be specified." + assert len(self.likelihood.noise_model_list) > output, "The model has only %s outputs." %(self.output_dim + 1) + if which_data == 'all': + which_data = slice(None) + + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) + + if self.X.shape[1] == 2: + resolution = resolution or 200 + + Xu = self.X[self.X[:,-1]==output,:] #keep the output of interest + Xu = self.X * self._Xscale + self._Xoffset + Xu = self.X[self.X[:,-1]==output ,0:1] #get rid of the index column + + Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) + Xnew_indexed = self._add_output_index(Xnew,output) + + + m, v, lower, upper = self.predict(Xnew_indexed, which_parts=which_parts,noise_model=output) + + if samples: #NOTE not tested with fixed_inputs + Ysim = self.posterior_samples(Xnew_indexed, samples, which_parts=which_parts, full_cov=True,noise_model=output) + for yi in Ysim.T: + ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + + for d in range(m.shape[1]): + gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax, edgecol=linecol, fillcol=fillcol) + ax.plot(Xu[which_data], self.likelihood.noise_model_list[output].data, 'kx', mew=1.5) + ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper)) + ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) + ax.set_xlim(xmin, xmax) + ax.set_ylim(ymin, ymax) + + elif self.X.shape[1] == 3: + raise NotImplementedError, "Plots not implemented for multioutput models with 2D inputs...yet" + #if samples: + # warnings.warn("Samples only implemented for 1 dimensional inputs.") + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + + + def _add_output_index(self,X,output): + """ + In a multioutput model, appends an index column to X to specify the output it is related to. + + :param X: Input data + :type X: np.ndarray, N x self.input_dim + :param output: output X is related to + :type output: integer in {0,..., output_dim-1} + + .. Note:: For multiple non-independent outputs models only. + """ + + assert hasattr(self,'multioutput'), 'This function is for multiple output models only.' + + index = np.ones((X.shape[0],1))*output + return np.hstack((X,index)) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 1754b614..8029b920 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -34,7 +34,6 @@ class SparseGP(GPBase): self.Z = Z self.num_inducing = Z.shape[0] -# self.likelihood = likelihood if X_variance is None: self.has_uncertain_inputs = False @@ -157,7 +156,7 @@ class SparseGP(GPBase): # the partial derivative vector for the likelihood - if self.likelihood.Nparams == 0: + if self.likelihood.num_params == 0: # save computation here. self.partial_for_likelihood = None elif self.likelihood.is_heteroscedastic: @@ -305,9 +304,8 @@ class SparseGP(GPBase): return mu, var[:, None] - def predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False): + def predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False, **likelihood_args): """ - Predict the function(s) at the new point(s) Xnew. **Arguments** @@ -338,56 +336,90 @@ class SparseGP(GPBase): mu, var = self._raw_predict(Xnew, X_variance_new, full_cov=full_cov, which_parts=which_parts) # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) + mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, **likelihood_args) return mean, var, _025pm, _975pm - def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None, output=None): + + def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None): + """ + Plot the GP's view of the world, where the data is normalized and the + - In one dimension, the function is plotted with a shaded region identifying two standard deviations. + - In two dimsensions, a contour-plot shows the mean predicted function + - Not implemented in higher dimensions + + :param samples: the number of a posteriori samples to plot + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :param which_data: which if the training data to plot (default all) + :type which_data: 'all' or a slice object to slice self.X, self.Y + :param which_parts: which of the kernel functions to plot (additively) + :type which_parts: 'all', or list of bools + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + :type resolution: int + :param full_cov: + :type full_cov: bool + :param fignum: figure to plot on. + :type fignum: figure number + :param ax: axes to plot on. + :type ax: axes handle + + :param output: which output to plot (for multiple output models only) + :type output: integer (first output is 0) + """ if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) + if fignum is None and ax is None: + fignum = fig.num if which_data is 'all': which_data = slice(None) - GPBase.plot(self, samples=0, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=None, levels=20, ax=ax, output=output) + GPBase.plot_f(self, samples=samples, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=resolution, full_cov=full_cov, fignum=fignum, ax=ax) - if not hasattr(self,'multioutput'): + if self.X.shape[1] == 1: + if self.has_uncertain_inputs: + Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now + ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], + xerr=2 * np.sqrt(self.X_variance[which_data, 0]), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + Zu = self.Z * self._Xscale + self._Xoffset + ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) - if self.X.shape[1] == 1: - if self.has_uncertain_inputs: - Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now - ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], - xerr=2 * np.sqrt(self.X_variance[which_data, 0]), - ecolor='k', fmt=None, elinewidth=.5, alpha=.5) - Zu = self.Z * self._Xscale + self._Xoffset - ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) + elif self.X.shape[1] == 2: + Zu = self.Z * self._Xscale + self._Xoffset + ax.plot(Zu[:, 0], Zu[:, 1], 'wo') - elif self.X.shape[1] == 2: - Zu = self.Z * self._Xscale + self._Xoffset - ax.plot(Zu[:, 0], Zu[:, 1], 'wo') else: - if self.X.shape[1] == 2 and hasattr(self,'multioutput'): - """ - Xu = self.X[self.X[:,-1]==output,:] - if self.has_uncertain_inputs: - Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - Xu = self.X[self.X[:,-1]==output ,0:1] #?? + def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None): + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) + if fignum is None and ax is None: + fignum = fig.num + if which_data is 'all': + which_data = slice(None) - ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], - xerr=2 * np.sqrt(self.X_variance[which_data, 0]), - ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + GPBase.plot(self, samples=samples, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=resolution, levels=20, fignum=fignum, ax=ax) - """ - Zu = self.Z[self.Z[:,-1]==output,:] - Zu = self.Z * self._Xscale + self._Xoffset - Zu = self.Z[self.Z[:,-1]==output ,0:1] #?? - ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) - #ax.set_ylim(ax.get_ylim()[0],) + if self.X.shape[1] == 1: + if self.has_uncertain_inputs: + Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now + ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], + xerr=2 * np.sqrt(self.X_variance[which_data, 0]), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + Zu = self.Z * self._Xscale + self._Xoffset + ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) - else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + elif self.X.shape[1] == 2: + Zu = self.Z * self._Xscale + self._Xoffset + ax.plot(Zu[:, 0], Zu[:, 1], 'wo') + + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" def predict_single_output(self, Xnew, output=0, which_parts='all', full_cov=False): """ @@ -470,3 +502,64 @@ class SparseGP(GPBase): var = Kxx - np.sum(np.sum(psi2 * Kmmi_LmiBLmi[None, :, :], 1), 1) return mu, var[:, None] + + + def plot_single_output_f(self, output=None, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False, fignum=None, ax=None): + + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) + if fignum is None and ax is None: + fignum = fig.num + if which_data is 'all': + which_data = slice(None) + + GPBase.plot_single_output_f(self, output=output, samples=samples, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=resolution, full_cov=full_cov, fignum=fignum, ax=ax) + + if self.X.shape[1] == 2: + if self.has_uncertain_inputs: + Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now + ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], + xerr=2 * np.sqrt(self.X_variance[which_data, 0]), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + Zu = self.Z * self._Xscale + self._Xoffset + Zu = Zu[Zu[:,1]==output,0:1] + ax.plot(Zu[:,0], np.zeros_like(Zu[:,0]) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) + + elif self.X.shape[1] == 2: + Zu = self.Z * self._Xscale + self._Xoffset + Zu = Zu[Zu[:,1]==output,0:2] + ax.plot(Zu[:, 0], Zu[:, 1], 'wo') + + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + + def plot_single_output(self, output=None, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None): + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) + if fignum is None and ax is None: + fignum = fig.num + if which_data is 'all': + which_data = slice(None) + + GPBase.plot_single_output(self, samples=samples, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=resolution, levels=20, fignum=fignum, ax=ax, output=output) + + if self.X.shape[1] == 2: + if self.has_uncertain_inputs: + Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now + ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0], + xerr=2 * np.sqrt(self.X_variance[which_data, 0]), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + Zu = self.Z * self._Xscale + self._Xoffset + Zu = Zu[Zu[:,1]==output,0:1] + ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12) + + elif self.X.shape[1] == 3: + Zu = self.Z * self._Xscale + self._Xoffset + Zu = Zu[Zu[:,1]==output,0:1] + ax.plot(Zu[:, 0], Zu[:, 1], 'wo') + + else: + raise NotImplementedError, "Cannot define a frame with more than two input dimensions" diff --git a/GPy/core/svigp.py b/GPy/core/svigp.py index b9101160..c5ea9c6b 100644 --- a/GPy/core/svigp.py +++ b/GPy/core/svigp.py @@ -350,8 +350,8 @@ class SVIGP(GPBase): #callback if i and not i%callback_interval: - callback() - time.sleep(0.1) + callback(self) # Change this to callback() + time.sleep(0.01) if self.epochs > 10: self._adapt_steplength() @@ -367,13 +367,13 @@ class SVIGP(GPBase): assert self.vb_steplength > 0 if self.adapt_param_steplength: - # self._adaptive_param_steplength() + self._adaptive_param_steplength() # self._adaptive_param_steplength_log() - self._adaptive_param_steplength_from_vb() + # self._adaptive_param_steplength_from_vb() self._param_steplength_trace.append(self.param_steplength) def _adaptive_param_steplength(self): - decr_factor = 0.1 + decr_factor = 0.02 g_tp = self._transform_gradients(self._log_likelihood_gradients()) self.gbar_tp = (1-1/self.tau_tp)*self.gbar_tp + 1/self.tau_tp * g_tp self.hbar_tp = (1-1/self.tau_tp)*self.hbar_tp + 1/self.tau_tp * np.dot(g_tp.T, g_tp) @@ -407,7 +407,7 @@ class SVIGP(GPBase): self.tau_t = self.tau_t*(1-self.vb_steplength) + 1 def _adaptive_vb_steplength_KL(self): - decr_factor = 1 #0.1 + decr_factor = 0.1 natgrad = self.vb_grad_natgrad() g_t1 = natgrad[0] g_t2 = natgrad[1] diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index d9e59111..dba856a0 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -57,8 +57,8 @@ def coregionalization_toy(max_iters=100): m.optimize(max_iters=max_iters) fig, axes = pb.subplots(2,1) - m.plot(output=0,ax=axes[0]) - m.plot(output=1,ax=axes[1]) + m.plot_single_output(output=0,ax=axes[0]) + m.plot_single_output(output=1,ax=axes[1]) axes[0].set_title('Output 0') axes[1].set_title('Output 1') return m @@ -77,14 +77,14 @@ def coregionalization_sparse(max_iters=100): k1 = GPy.kern.rbf(1) - m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1],num_inducing=20) + m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1],num_inducing=5) m.constrain_fixed('.*rbf_var',1.) - m.optimize(messages=1) - #m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs') + #m.optimize(messages=1) + m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs') fig, axes = pb.subplots(2,1) - m.plot(output=0,ax=axes[0]) - m.plot(output=1,ax=axes[1]) + m.plot_single_output(output=0,ax=axes[0],plot_limits=(-1,9)) + m.plot_single_output(output=1,ax=axes[1],plot_limits=(-1,9)) axes[0].set_title('Output 0') axes[1].set_title('Output 1') return m diff --git a/GPy/inference/scg.py b/GPy/inference/scg.py index f4c7c9c4..252f348e 100644 --- a/GPy/inference/scg.py +++ b/GPy/inference/scg.py @@ -62,7 +62,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, fnow = fold gradnew = gradf(x, *optargs) # Initial gradient. if any(np.isnan(gradnew)): - raise UnexpectedInfOrNan + raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value" current_grad = np.dot(gradnew, gradnew) gradold = gradnew.copy() d = -gradnew # Initial search direction. diff --git a/GPy/kern/constructors.py b/GPy/kern/constructors.py index a8ec1d4b..62c29744 100644 --- a/GPy/kern/constructors.py +++ b/GPy/kern/constructors.py @@ -298,43 +298,65 @@ if sympy_available: """ Radial Basis Function covariance. """ - X = [sp.var('x%i' % i) for i in range(input_dim)] - Z = [sp.var('z%i' % i) for i in range(input_dim)] + X = sp.symbols('x_:' + str(input_dim)) + Z = sp.symbols('z_:' + str(input_dim)) variance = sp.var('variance',positive=True) if ARD: - lengthscales = [sp.var('lengthscale_%i' % i, positive=True) for i in range(input_dim)] - dist_string = ' + '.join(['(x%i-z%i)**2/lengthscale_%i**2' % (i, i, i) for i in range(input_dim)]) + lengthscales = sp.symbols('lengthscale_:' + str(input_dim)) + dist_string = ' + '.join(['(x_%i-z_%i)**2/lengthscale%i**2' % (i, i, i) for i in range(input_dim)]) dist = parse_expr(dist_string) f = variance*sp.exp(-dist/2.) else: lengthscale = sp.var('lengthscale',positive=True) - dist_string = ' + '.join(['(x%i-z%i)**2' % (i, i) for i in range(input_dim)]) + dist_string = ' + '.join(['(x_%i-z_%i)**2' % (i, i) for i in range(input_dim)]) dist = parse_expr(dist_string) f = variance*sp.exp(-dist/(2*lengthscale**2)) return kern(input_dim, [spkern(input_dim, f, name='rbf_sympy')]) + def eq_sympy(input_dim, output_dim, ARD=False, variance=1., lengthscale=1.): + """ + Exponentiated quadratic with multiple outputs. + """ + real_input_dim = input_dim + if output_dim>1: + real_input_dim -= 1 + X = sp.symbols('x_:' + str(real_input_dim)) + Z = sp.symbols('z_:' + str(real_input_dim)) + variance = sp.var('variance',positive=True) + if ARD: + lengthscales = [sp.var('lengthscale%i_i lengthscale%i_j' % i, positive=True) for i in range(real_input_dim)] + dist_string = ' + '.join(['(x_%i-z_%i)**2/(lengthscale%i_i*lengthscale%i_j)' % (i, i, i) for i in range(real_input_dim)]) + dist = parse_expr(dist_string) + f = variance*sp.exp(-dist/2.) + else: + lengthscale = sp.var('lengthscale_i lengthscale_j',positive=True) + dist_string = ' + '.join(['(x_%i-z_%i)**2' % (i, i) for i in range(real_input_dim)]) + dist = parse_expr(dist_string) + f = variance*sp.exp(-dist/(2*lengthscale_i*lengthscale_j)) + return kern(input_dim, [spkern(input_dim, f, output_dim=output_dim, name='eq_sympy')]) + def sinc(input_dim, ARD=False, variance=1., lengthscale=1.): """ TODO: Not clear why this isn't working, suggests argument of sinc is not a number. sinc covariance funciton """ - X = [sp.var('x%i' % i) for i in range(input_dim)] - Z = [sp.var('z%i' % i) for i in range(input_dim)] + X = sp.symbols('x_:' + str(input_dim)) + Z = sp.symbols('z_:' + str(input_dim)) variance = sp.var('variance',positive=True) if ARD: lengthscales = [sp.var('lengthscale_%i' % i, positive=True) for i in range(input_dim)] - dist_string = ' + '.join(['(x%i-z%i)**2/lengthscale_%i**2' % (i, i, i) for i in range(input_dim)]) + dist_string = ' + '.join(['(x_%i-z_%i)**2/lengthscale_%i**2' % (i, i, i) for i in range(input_dim)]) dist = parse_expr(dist_string) f = variance*sinc(sp.pi*sp.sqrt(dist)) else: lengthscale = sp.var('lengthscale',positive=True) - dist_string = ' + '.join(['(x%i-z%i)**2' % (i, i) for i in range(input_dim)]) + dist_string = ' + '.join(['(x_%i-z_%i)**2' % (i, i) for i in range(input_dim)]) dist = parse_expr(dist_string) f = variance*sinc(sp.pi*sp.sqrt(dist)/lengthscale) return kern(input_dim, [spkern(input_dim, f, name='sinc')]) - def sympykern(input_dim, k,name=None): + def sympykern(input_dim, k=None, output_dim=1, name=None, param=None): """ A base kernel object, where all the hard work in done by sympy. @@ -349,7 +371,7 @@ if sympy_available: - to handle multiple inputs, call them x1, z1, etc - to handle multpile correlated outputs, you'll need to define each covariance function and 'cross' variance function. TODO """ - return kern(input_dim, [spkern(input_dim, k,name)]) + return kern(input_dim, [spkern(input_dim, k=k, output_dim=output_dim, name=name, param=param)]) del sympy_available def periodic_exponential(input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 303f673b..443ec9b1 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -32,7 +32,7 @@ class kern(Parameterized): """ self.parts = parts - self.Nparts = len(parts) + self.num_parts = len(parts) self.num_params = sum([p.num_params for p in self.parts]) self.input_dim = input_dim @@ -62,7 +62,7 @@ class kern(Parameterized): here just all the indices, rest can get recomputed """ return Parameterized.getstate(self) + [self.parts, - self.Nparts, + self.num_parts, self.num_params, self.input_dim, self.input_slices, @@ -74,7 +74,7 @@ class kern(Parameterized): self.input_slices = state.pop() self.input_dim = state.pop() self.num_params = state.pop() - self.Nparts = state.pop() + self.num_parts = state.pop() self.parts = state.pop() Parameterized.setstate(self, state) @@ -309,7 +309,7 @@ class kern(Parameterized): def K(self, X, X2=None, which_parts='all'): if which_parts == 'all': - which_parts = [True] * self.Nparts + which_parts = [True] * self.num_parts assert X.shape[1] == self.input_dim if X2 is None: target = np.zeros((X.shape[0], X.shape[0])) @@ -360,7 +360,7 @@ class kern(Parameterized): def Kdiag(self, X, which_parts='all'): """Compute the diagonal of the covariance function for inputs X.""" if which_parts == 'all': - which_parts = [True] * self.Nparts + which_parts = [True] * self.num_parts assert X.shape[1] == self.input_dim target = np.zeros(X.shape[0]) [p.Kdiag(X[:, i_s], target=target) for p, i_s, part_on in zip(self.parts, self.input_slices, which_parts) if part_on] @@ -497,7 +497,7 @@ class kern(Parameterized): def plot(self, x=None, plot_limits=None, which_parts='all', resolution=None, *args, **kwargs): if which_parts == 'all': - which_parts = [True] * self.Nparts + which_parts = [True] * self.num_parts if self.input_dim == 1: if x is None: x = np.zeros((1, 1)) @@ -658,7 +658,7 @@ class Kern_check_dKdiag_dX(Kern_check_model): def _set_params(self, x): self.X=x.reshape(self.X.shape) -def kern_test(kern, X=None, X2=None, verbose=False): +def kern_test(kern, X=None, X2=None, output_ind=None, verbose=False): """This function runs on kernels to check the correctness of their implementation. It checks that the covariance function is positive definite for a randomly generated data set. :param kern: the kernel to be tested. @@ -672,8 +672,13 @@ def kern_test(kern, X=None, X2=None, verbose=False): pass_checks = True if X==None: X = np.random.randn(10, kern.input_dim) + if output_ind is not None: + X[:, output_ind] = np.random.randint(kern.output_dim, X.shape[0]) if X2==None: X2 = np.random.randn(20, kern.input_dim) + if output_ind is not None: + X2[:, output_ind] = np.random.randint(kern.output_dim, X2.shape[0]) + if verbose: print("Checking covariance function is positive definite.") result = Kern_check_model(kern, X=X).is_positive_definite() diff --git a/GPy/kern/parts/kernpart.py b/GPy/kern/parts/kernpart.py index 3edf39df..503fb5bd 100644 --- a/GPy/kern/parts/kernpart.py +++ b/GPy/kern/parts/kernpart.py @@ -5,15 +5,18 @@ class Kernpart(object): def __init__(self,input_dim): """ - The base class for a kernpart: a positive definite function which forms part of a kernel + The base class for a kernpart: a positive definite function which forms part of a covariance function (kernel). :param input_dim: the number of input dimensions to the function :type input_dim: int Do not instantiate. """ + # the input dimensionality for the covariance self.input_dim = input_dim + # the number of optimisable parameters self.num_params = 1 + # the name of the covariance function. self.name = 'unnamed' def _get_params(self): diff --git a/GPy/kern/parts/periodic_Matern32.py b/GPy/kern/parts/periodic_Matern32.py index 5693085d..0de57f82 100644 --- a/GPy/kern/parts/periodic_Matern32.py +++ b/GPy/kern/parts/periodic_Matern32.py @@ -113,7 +113,7 @@ class PeriodicMatern32(Kernpart): @silence_errors def dK_dtheta(self,dL_dK,X,X2,target): - """derivative of the covariance matrix with respect to the parameters (shape is Nxnum_inducingxNparam)""" + """derivative of the covariance matrix with respect to the parameters (shape is num_data x num_inducing x num_params)""" if X2 is None: X2 = X FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X) FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2) diff --git a/GPy/kern/parts/periodic_Matern52.py b/GPy/kern/parts/periodic_Matern52.py index 7b5ae846..882084fd 100644 --- a/GPy/kern/parts/periodic_Matern52.py +++ b/GPy/kern/parts/periodic_Matern52.py @@ -115,7 +115,7 @@ class PeriodicMatern52(Kernpart): @silence_errors def dK_dtheta(self,dL_dK,X,X2,target): - """derivative of the covariance matrix with respect to the parameters (shape is Nxnum_inducingxNparam)""" + """derivative of the covariance matrix with respect to the parameters (shape is num_data x num_inducing x num_params)""" if X2 is None: X2 = X FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X) FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2) diff --git a/GPy/kern/parts/periodic_exponential.py b/GPy/kern/parts/periodic_exponential.py index 36b7b9ac..201def6d 100644 --- a/GPy/kern/parts/periodic_exponential.py +++ b/GPy/kern/parts/periodic_exponential.py @@ -111,7 +111,7 @@ class PeriodicExponential(Kernpart): @silence_errors def dK_dtheta(self,dL_dK,X,X2,target): - """derivative of the covariance matrix with respect to the parameters (shape is Nxnum_inducingxNparam)""" + """derivative of the covariance matrix with respect to the parameters (shape is N x num_inducing x num_params)""" if X2 is None: X2 = X FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X) FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2) diff --git a/GPy/kern/parts/sympy_helpers.cpp b/GPy/kern/parts/sympy_helpers.cpp index 76dba4eb..e4df4d80 100644 --- a/GPy/kern/parts/sympy_helpers.cpp +++ b/GPy/kern/parts/sympy_helpers.cpp @@ -1,4 +1,7 @@ #include +#include +#include + double DiracDelta(double x){ // TODO: this doesn't seem to be a dirac delta ... should return infinity. Neil if((x<0.000001) & (x>-0.000001))//go on, laugh at my c++ skills @@ -23,3 +26,36 @@ double sinc_grad(double x){ else return (x*cos(x) - sin(x))/(x*x); } + +double erfcx(double x){ + double xneg=-sqrt(log(DBL_MAX/2)); + double xmax = 1/(sqrt(M_PI)*DBL_MIN); + xmax = DBL_MAXxmax) + return 0.0; + else + return y; +} + +double ln_diff_erf(double x0, double x1){ + if (x0==x1) + return INFINITY; + else if(x0<0 && x1>0 || x0>0 && x1<0) + return log(erf(x0)-erf(x1)); + else if(x1>0) + return log(erfcx(x1)-erfcx(x0)*exp(x1*x1)- x0*x0)-x1*x1; + else + return log(erfcx(-x0)-erfcx(-x1)*exp(x0*x0 - x1*x1))-x0*x0; +} diff --git a/GPy/kern/parts/sympy_helpers.h b/GPy/kern/parts/sympy_helpers.h index d5b495ca..56220167 100644 --- a/GPy/kern/parts/sympy_helpers.h +++ b/GPy/kern/parts/sympy_helpers.h @@ -4,3 +4,6 @@ double DiracDelta(double x, int foo); double sinc(double x); double sinc_grad(double x); + +double erfcx(double x); +double ln_diff_erf(double x0, double x1); diff --git a/GPy/kern/parts/sympykern.py b/GPy/kern/parts/sympykern.py index 9755e37b..09ab9934 100644 --- a/GPy/kern/parts/sympykern.py +++ b/GPy/kern/parts/sympykern.py @@ -9,6 +9,7 @@ import sys current_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) import tempfile import pdb +import ast from kernpart import Kernpart class spkern(Kernpart): @@ -16,64 +17,334 @@ class spkern(Kernpart): A kernel object, where all the hard work in done by sympy. :param k: the covariance function - :type k: a positive definite sympy function of x1, z1, x2, z2... + :type k: a positive definite sympy function of x_0, z_0, x_1, z_1, x_2, z_2... To construct a new sympy kernel, you'll need to define: - a kernel function using a sympy object. Ensure that the kernel is of the form k(x,z). - that's it! we'll extract the variables from the function k. Note: - - to handle multiple inputs, call them x1, z1, etc - - to handle multpile correlated outputs, you'll need to define each covariance function and 'cross' variance function. TODO + - to handle multiple inputs, call them x_1, z_1, etc + - to handle multpile correlated outputs, you'll need to add parameters with an index, such as lengthscale_i and lengthscale_j. """ - def __init__(self,input_dim,k,name=None,param=None): + def __init__(self, input_dim, k=None, output_dim=1, name=None, param=None): if name is None: self.name='sympykern' else: self.name = name + if k is None: + raise ValueError, "You must provide an argument for the covariance function." self._sp_k = k sp_vars = [e for e in k.atoms() if e.is_Symbol] - self._sp_x= sorted([e for e in sp_vars if e.name[0]=='x'],key=lambda x:int(x.name[1:])) - self._sp_z= sorted([e for e in sp_vars if e.name[0]=='z'],key=lambda z:int(z.name[1:])) - assert all([x.name=='x%i'%i for i,x in enumerate(self._sp_x)]) - assert all([z.name=='z%i'%i for i,z in enumerate(self._sp_z)]) + self._sp_x= sorted([e for e in sp_vars if e.name[0:2]=='x_'],key=lambda x:int(x.name[2:])) + self._sp_z= sorted([e for e in sp_vars if e.name[0:2]=='z_'],key=lambda z:int(z.name[2:])) + # Check that variable names make sense. + assert all([x.name=='x_%i'%i for i,x in enumerate(self._sp_x)]) + assert all([z.name=='z_%i'%i for i,z in enumerate(self._sp_z)]) assert len(self._sp_x)==len(self._sp_z) self.input_dim = len(self._sp_x) + if output_dim > 1: + self.input_dim += 1 + assert self.input_dim == input_dim - self._sp_theta = sorted([e for e in sp_vars if not (e.name[0]=='x' or e.name[0]=='z')],key=lambda e:e.name) - self.num_params = len(self._sp_theta) + self.output_dim = output_dim + # extract parameter names + thetas = sorted([e for e in sp_vars if not (e.name[0:2]=='x_' or e.name[0:2]=='z_')],key=lambda e:e.name) - #deal with param - if param is None: - param = np.ones(self.num_params) - assert param.size==self.num_params - self._set_params(param) + + # Look for parameters with index. + if self.output_dim>1: + self._sp_theta_i = sorted([e for e in thetas if (e.name[-2:]=='_i')], key=lambda e:e.name) + self._sp_theta_j = sorted([e for e in thetas if (e.name[-2:]=='_j')], key=lambda e:e.name) + # Make sure parameter appears with both indices! + assert len(self._sp_theta_i)==len(self._sp_theta_j) + assert all([theta_i.name[:-2]==theta_j.name[:-2] for theta_i, theta_j in zip(self._sp_theta_i, self._sp_theta_j)]) + + # Extract names of shared parameters + self._sp_theta = [theta for theta in thetas if theta not in self._sp_theta_i and theta not in self._sp_theta_j] + + self.num_split_params = len(self._sp_theta_i) + self._split_theta_names = ["%s"%theta.name[:-2] for theta in self._sp_theta_i] + for theta in self._split_theta_names: + setattr(self, theta, np.ones(self.output_dim)) + + self.num_shared_params = len(self._sp_theta) + self.num_params = self.num_shared_params+self.num_split_params*self.output_dim + + else: + self.num_split_params = 0 + self._split_theta_names = [] + self._sp_theta = thetas + self.num_shared_params = len(self._sp_theta) + self.num_params = self.num_shared_params + + for theta in self._sp_theta: + val = 1.0 + if param is not None: + if param.has_key(theta): + val = param[theta] + setattr(self, theta.name, val) + #deal with param + self._set_params(self._get_params()) #Differentiate! self._sp_dk_dtheta = [sp.diff(k,theta).simplify() for theta in self._sp_theta] + if self.output_dim > 1: + self._sp_dk_dtheta_i = [sp.diff(k,theta).simplify() for theta in self._sp_theta_i] + self._sp_dk_dx = [sp.diff(k,xi).simplify() for xi in self._sp_x] - #self._sp_dk_dz = [sp.diff(k,zi) for zi in self._sp_z] - #self.compute_psi_stats() + if False: + self.compute_psi_stats() + self._gen_code() - self.weave_kwargs = {\ - 'support_code':self._function_code,\ - 'include_dirs':[tempfile.gettempdir(), os.path.join(current_dir,'parts/')],\ - 'headers':['"sympy_helpers.h"'],\ - 'sources':[os.path.join(current_dir,"parts/sympy_helpers.cpp")],\ - #'extra_compile_args':['-ftree-vectorize', '-mssse3', '-ftree-vectorizer-verbose=5'],\ - 'extra_compile_args':[],\ - 'extra_link_args':['-lgomp'],\ + if False: + extra_compile_args = ['-ftree-vectorize', '-mssse3', '-ftree-vectorizer-verbose=5'] + else: + extra_compile_args = [] + + self.weave_kwargs = { + 'support_code':self._function_code, + 'include_dirs':[tempfile.gettempdir(), os.path.join(current_dir,'parts/')], + 'headers':['"sympy_helpers.h"'], + 'sources':[os.path.join(current_dir,"parts/sympy_helpers.cpp")], + 'extra_compile_args':extra_compile_args, + 'extra_link_args':['-lgomp'], 'verbose':True} def __add__(self,other): return spkern(self._sp_k+other._sp_k) + def _gen_code(self): + #generate c functions from sympy objects + argument_sequence = self._sp_x+self._sp_z+self._sp_theta + code_list = [('k',self._sp_k)] + # gradients with respect to covariance input + code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)] + # gradient with respect to parameters + code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)] + # gradient with respect to multiple output parameters + if self.output_dim > 1: + argument_sequence += self._sp_theta_i + self._sp_theta_j + code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)] + (foo_c,self._function_code), (foo_h,self._function_header) = \ + codegen(code_list, "C",'foobar',argument_sequence=argument_sequence) + #put the header file where we can find it + f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w') + f.write(self._function_header) + f.close() + + # Substitute any known derivatives which sympy doesn't compute + self._function_code = re.sub('DiracDelta\(.+?,.+?\)','0.0',self._function_code) + + # This is the basic argument construction for the C code. + arg_list = (["X[i*input_dim+%s]"%x.name[2:] for x in self._sp_x] + + ["Z[j*input_dim+%s]"%z.name[2:] for z in self._sp_z]) + if self.output_dim>1: + reverse_arg_list = list(arg_list) + reverse_arg_list.reverse() + + param_arg_list = [shared_params.name for shared_params in self._sp_theta] + arg_list += param_arg_list + + precompute_list=[] + if self.output_dim > 1: + reverse_arg_list+=list(param_arg_list) + split_param_arg_list = ["%s[%s]"%(theta.name[:-2],index) for index in ['ii', 'jj'] for theta in self._sp_theta_i] + split_param_reverse_arg_list = ["%s[%s]"%(theta.name[:-2],index) for index in ['jj', 'ii'] for theta in self._sp_theta_i] + arg_list += split_param_arg_list + reverse_arg_list += split_param_reverse_arg_list + precompute_list += [' '*16+"int %s=(int)%s[%s*input_dim+output_dim];"%(index, var, index2) for index, var, index2 in zip(['ii', 'jj'], ['X', 'Z'], ['i', 'j'])] + reverse_arg_string = ", ".join(reverse_arg_list) + arg_string = ", ".join(arg_list) + precompute_string = "\n".join(precompute_list) + # Here's the code to do the looping for K + self._K_code =\ + """ + int i; + int j; + int N = target_array->dimensions[0]; + int num_inducing = target_array->dimensions[1]; + int input_dim = X_array->dimensions[1]; + //#pragma omp parallel for private(j) + for (i=0;idimensions[0]; + int input_dim = X_array->dimensions[1]; + //#pragma omp parallel for + for (i=0;i1: + func_list += [' '*16 + "int %s=(int)%s[%s*input_dim+output_dim];"%(index, var, index2) for index, var, index2 in zip(['ii', 'jj'], ['X', 'Z'], ['i', 'j'])] + func_list += [' '*16 + 'target[%i+ii] += partial[i*num_inducing+j]*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)] + func_list += [' '*16 + 'target[%i+jj] += partial[i*num_inducing+j]*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)] + func_list += ([' '*16 + 'target[%i] += partial[i*num_inducing+j]*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)]) + func_string = '\n'.join(func_list) + + self._dK_dtheta_code =\ + """ + int i; + int j; + int N = partial_array->dimensions[0]; + int num_inducing = partial_array->dimensions[1]; + int input_dim = X_array->dimensions[1]; + //#pragma omp parallel for private(j) + for (i=0;idimensions[0]; + int input_dim = X_array->dimensions[1]; + for (i=0;idimensions[0]; + int num_inducing = partial_array->dimensions[1]; + int input_dim = X_array->dimensions[1]; + //#pragma omp parallel for private(j) + for (i=0;idimensions[0]; + int input_dim = X_array->dimensions[1]; + for (int i=0;i1: + arg_names += self._split_theta_names + arg_names += ['output_dim'] + return arg_names + + def _weave_inline(self, code, X, target, Z=None, partial=None): + output_dim = self.output_dim + for shared_params in self._sp_theta: + locals()[shared_params.name] = getattr(self, shared_params.name) + + # Need to extract parameters first + for split_params in self._split_theta_names: + locals()[split_params] = getattr(self, split_params) + arg_names = self._get_arg_names(Z, partial) + weave.inline(code=code, arg_names=arg_names,**self.weave_kwargs) + + def K(self,X,Z,target): + if Z is None: + self._weave_inline(self._K_code_X, X, target) + else: + self._weave_inline(self._K_code, X, target, Z) + + + def Kdiag(self,X,target): + self._weave_inline(self._Kdiag_code, X, target) + + def dK_dtheta(self,partial,X,Z,target): + if Z is None: + self._weave_inline(self._dK_dtheta_code_X, X, target, Z, partial) + else: + self._weave_inline(self._dK_dtheta_code, X, target, Z, partial) + + def dKdiag_dtheta(self,partial,X,target): + self._weave_inline(self._dKdiag_dtheta_code, X, target, Z=None, partial=partial) + + def dK_dX(self,partial,X,Z,target): + if Z is None: + self._weave_inline(self._dK_dX_code_X, X, target, Z, partial) + else: + self._weave_inline(self._dK_dX_code, X, target, Z, partial) + + def dKdiag_dX(self,partial,X,target): + self._weave.inline(self._dKdiag_dX_code, X, target, Z, partial) + def compute_psi_stats(self): #define some normal distributions - mus = [sp.var('mu%i'%i,real=True) for i in range(self.input_dim)] - Ss = [sp.var('S%i'%i,positive=True) for i in range(self.input_dim)] + mus = [sp.var('mu_%i'%i,real=True) for i in range(self.input_dim)] + Ss = [sp.var('S_%i'%i,positive=True) for i in range(self.input_dim)] normals = [(2*sp.pi*Si)**(-0.5)*sp.exp(-0.5*(xi-mui)**2/Si) for xi, mui, Si in zip(self._sp_x, mus, Ss)] #do some integration! @@ -99,188 +370,29 @@ class spkern(Kernpart): self._sp_psi2 = self._sp_psi2.simplify() - def _gen_code(self): - #generate c functions from sympy objects - (foo_c,self._function_code),(foo_h,self._function_header) = \ - codegen([('k',self._sp_k)] \ - + [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\ - #+ [('dk_d%s'%z.name,dz) for z,dz in zip(self._sp_z,self._sp_dk_dz)]\ - + [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\ - ,"C",'foobar',argument_sequence=self._sp_x+self._sp_z+self._sp_theta) - #put the header file where we can find it - f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w') - f.write(self._function_header) - f.close() + def _set_params(self,param): + assert param.size == (self.num_params) + for i, shared_params in enumerate(self._sp_theta): + setattr(self, shared_params.name, param[i]) + + if self.output_dim>1: + for i, split_params in enumerate(self._split_theta_names): + start = self.num_shared_params + i*self.output_dim + end = self.num_shared_params + (i+1)*self.output_dim + setattr(self, split_params, param[start:end]) - # Substitute any known derivatives which sympy doesn't compute - self._function_code = re.sub('DiracDelta\(.+?,.+?\)','0.0',self._function_code) - - # Here's the code to do the looping for K - arglist = ", ".join(["X[i*input_dim+%s]"%x.name[1:] for x in self._sp_x] - + ["Z[j*input_dim+%s]"%z.name[1:] for z in self._sp_z] - + ["param[%i]"%i for i in range(self.num_params)]) - - - self._K_code =\ - """ - int i; - int j; - int N = target_array->dimensions[0]; - int num_inducing = target_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for - for (i=0;idimensions[0]; - int num_inducing = partial_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - for (i=0;idimensions[0]; - int num_inducing = partial_array->dimensions[1]; - int input_dim = X_array->dimensions[1]; - //#pragma omp parallel for private(j) - for (i=0;idimensions[0]; - int input_dim = X_array->dimensions[1]; - for (int i=0;i1: + for split_params in self._split_theta_names: + params = np.hstack((params, getattr(self, split_params).flatten())) + return params def _get_param_names(self): - return [x.name for x in self._sp_theta] + if self.output_dim>1: + return [x.name for x in self._sp_theta] + [x.name[:-2] + str(i) for x in self._sp_theta_i for i in range(self.output_dim)] + else: + return [x.name for x in self._sp_theta] diff --git a/GPy/likelihoods/ep.py b/GPy/likelihoods/ep.py index d242e583..4fedd66b 100644 --- a/GPy/likelihoods/ep.py +++ b/GPy/likelihoods/ep.py @@ -18,7 +18,7 @@ class EP(likelihood): self.data = data self.num_data, self.output_dim = self.data.shape self.is_heteroscedastic = True - self.Nparams = 0 + self.num_params = 0 self._transf_data = self.noise_model._preprocess_values(data) #Initial values - Likelihood approximation parameters: diff --git a/GPy/likelihoods/ep_mixed_noise.py b/GPy/likelihoods/ep_mixed_noise.py index ffc8cb51..f5452512 100644 --- a/GPy/likelihoods/ep_mixed_noise.py +++ b/GPy/likelihoods/ep_mixed_noise.py @@ -31,7 +31,7 @@ class EP_Mixed_Noise(likelihood): self.data = np.vstack(data_list) self.N, self.output_dim = self.data.shape self.is_heteroscedastic = True - self.Nparams = 0#FIXME + self.num_params = 0#FIXME self._transf_data = np.vstack([noise_model._preprocess_values(data) for noise_model,data in zip(noise_model_list,data_list)]) #TODO non-gaussian index diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 8f66d074..da13ddb0 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -15,7 +15,7 @@ class Gaussian(likelihood): """ def __init__(self, data, variance=1., normalize=False): self.is_heteroscedastic = False - self.Nparams = 1 + self.num_params = 1 self.Z = 0. # a correction factor which accounts for the approximation made N, self.output_dim = data.shape diff --git a/GPy/likelihoods/gaussian_mixed_noise.py b/GPy/likelihoods/gaussian_mixed_noise.py index 4df01ec2..696867c0 100644 --- a/GPy/likelihoods/gaussian_mixed_noise.py +++ b/GPy/likelihoods/gaussian_mixed_noise.py @@ -23,14 +23,14 @@ class Gaussian_Mixed_Noise(likelihood): :type normalize: False|True """ def __init__(self, data_list, noise_params=None, normalize=True): - self.Nparams = len(data_list) + self.num_params = len(data_list) self.n_list = [data.size for data in data_list] - self.index = np.vstack([np.repeat(i,n)[:,None] for i,n in zip(range(self.Nparams),self.n_list)]) + self.index = np.vstack([np.repeat(i,n)[:,None] for i,n in zip(range(self.num_params),self.n_list)]) if noise_params is None: - noise_params = [1.] * self.Nparams + noise_params = [1.] * self.num_params else: - assert self.Nparams == len(noise_params), 'Number of noise parameters does not match the number of noise models.' + assert self.num_params == len(noise_params), 'Number of noise parameters does not match the number of noise models.' self.noise_model_list = [Gaussian(Y,variance=v,normalize = normalize) for Y,v in zip(data_list,noise_params)] self.n_params = [noise_model._get_params().size for noise_model in self.noise_model_list] diff --git a/GPy/likelihoods/noise_models/binomial_noise.py b/GPy/likelihoods/noise_models/binomial_noise.py index ab1f237a..c0bb8be4 100644 --- a/GPy/likelihoods/noise_models/binomial_noise.py +++ b/GPy/likelihoods/noise_models/binomial_noise.py @@ -117,3 +117,16 @@ class Binomial(NoiseDistribution): def _d2variance_dgp2(self,gp): return self.gp_link.d2transf_df2(gp)*(1. - 2.*self.gp_link.transf(gp)) - 2*self.gp_link.dtransf_df(gp)**2 + + + def samples(self, gp): + """ + Returns a set of samples of observations based on a given value of the latent variable. + + :param size: number of samples to compute + :param gp: latent variable + """ + orig_shape = gp.shape + gp = gp.flatten() + Ysim = np.array([np.random.binomial(1,self.gp_link.transf(gpj),size=1) for gpj in gp]) + return Ysim.reshape(orig_shape) diff --git a/GPy/likelihoods/noise_models/noise_distributions.py b/GPy/likelihoods/noise_models/noise_distributions.py index 67fbbe72..913c2b9d 100644 --- a/GPy/likelihoods/noise_models/noise_distributions.py +++ b/GPy/likelihoods/noise_models/noise_distributions.py @@ -413,3 +413,13 @@ class NoiseDistribution(object): q1 = np.vstack(q1) q3 = np.vstack(q3) return pred_mean, pred_var, q1, q3 + + + def samples(self, gp): + """ + Returns a set of samples of observations based on a given value of the latent variable. + + :param gp: latent variable + """ + pass + diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index be191e9b..1435028f 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -211,8 +211,8 @@ class MRD(Model): # g.Z = Z.reshape(self.num_inducing, self.input_dim) # # def _set_kern_params(self, g, p): -# g.kern._set_params(p[:g.kern.Nparam]) -# g.likelihood._set_params(p[g.kern.Nparam:]) +# g.kern._set_params(p[:g.kern.num_params]) +# g.likelihood._set_params(p[g.kern.num_params:]) def _set_params(self, x): start = 0; end = self.NQ diff --git a/GPy/models/svigp_regression.py b/GPy/models/svigp_regression.py index 4d22c619..e826bf35 100644 --- a/GPy/models/svigp_regression.py +++ b/GPy/models/svigp_regression.py @@ -25,7 +25,7 @@ class SVIGPRegression(SVIGP): """ - def __init__(self, X, Y, kernel=None, Z=None, num_inducing=10, q_u=None, batchsize=10): + def __init__(self, X, Y, kernel=None, Z=None, num_inducing=10, q_u=None, batchsize=10, normalize_Y=False): # kern defaults to rbf (plus white for stability) if kernel is None: kernel = kern.rbf(X.shape[1], variance=1., lengthscale=4.) + kern.white(X.shape[1], 1e-3) @@ -38,7 +38,7 @@ class SVIGPRegression(SVIGP): assert Z.shape[1] == X.shape[1] # likelihood defaults to Gaussian - likelihood = likelihoods.Gaussian(Y, normalize=False) + likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y) SVIGP.__init__(self, X, likelihood, kernel, Z, q_u=q_u, batchsize=batchsize) self.load_batch() diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 87d4a20e..f64dac2b 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -7,6 +7,13 @@ import GPy verbose = False +try: + import sympy + SYMPY_AVAILABLE=True +except ImportError: + SYMPY_AVAILABLE=False + + class KernelTests(unittest.TestCase): def test_kerneltie(self): K = GPy.kern.rbf(5, ARD=True) @@ -22,7 +29,16 @@ class KernelTests(unittest.TestCase): self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) def test_rbf_sympykernel(self): - kern = GPy.kern.rbf_sympy(5) + if SYMPY_AVAILABLE: + kern = GPy.kern.rbf_sympy(5) + self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + + def test_eq_sympykernel(self): + kern = GPy.kern.eq_sympy(5, 3, output_ind=4) + self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) + + def test_sinckernel(self): + kern = GPy.kern.sinc(5) self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose)) def test_rbf_invkernel(self): diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 56ac3b47..7676edf9 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -491,11 +491,11 @@ def ripley_synth(data_set='ripley_prnn_data'): def osu_run1(data_set='osu_run1', sample_every=4): if not data_available(data_set): download_data(data_set) - zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'sprintTXT.ZIP'), 'r') + zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r') path = os.path.join(data_path, data_set) for name in zip.namelist(): zip.extract(name, path) - Y, connect = GPy.util.mocap.load_text_data('Aug210107', path) + Y, connect = GPy.util.mocap.load_text_data('Aug210106', path) Y = Y[0:-1:sample_every, :] return data_details_return({'Y': Y, 'connect' : connect}, data_set) diff --git a/GPy/util/symbolic.py b/GPy/util/symbolic.py index f4f5fda0..8b368a77 100644 --- a/GPy/util/symbolic.py +++ b/GPy/util/symbolic.py @@ -1,32 +1,91 @@ -from sympy import Function, S, oo, I, cos, sin +from sympy import Function, S, oo, I, cos, sin, asin, log, erf,pi,exp +class ln_diff_erf(Function): + nargs = 2 + + def fdiff(self, argindex=2): + if argindex == 2: + x0, x1 = self.args + return -2*exp(-x1**2)/(sqrt(pi)*(erf(x0)-erf(x1))) + elif argindex == 1: + x0, x1 = self.args + return 2*exp(-x0**2)/(sqrt(pi)*(erf(x0)-erf(x1))) + else: + raise ArgumentIndexError(self, argindex) + + @classmethod + def eval(cls, x0, x1): + if x0.is_Number and x1.is_Number: + return log(erf(x0)-erf(x1)) + +class sim_h(Function): + nargs = 5 + + @classmethod + def eval(cls, t, tprime, d_i, d_j, l): + return exp((d_j/2*l)**2)/(d_i+d_j)*(exp(-d_j*(tprime - t))*(erf((tprime-t)/l - d_j/2*l) + erf(t/l + d_j/2*l)) - exp(-(d_j*tprime + d_i))*(erf(tprime/l - d_j/2*l) + erf(d_j/2*l))) + +class erfc(Function): + nargs = 1 + + @classmethod + def eval(cls, arg): + return 1-erf(arg) + +class erfcx(Function): + nargs = 1 + + @classmethod + def eval(cls, arg): + return erfc(arg)*exp(arg*arg) + class sinc_grad(Function): nargs = 1 def fdiff(self, argindex=1): - return ((2-x*x)*sin(self.args[0]) - 2*x*cos(x))/(x*x*x) + if argindex==1: + # Strictly speaking this should be computed separately, as it won't work when x=0. See http://calculus.subwiki.org/wiki/Sinc_function + return ((2-x*x)*sin(self.args[0]) - 2*x*cos(x))/(x*x*x) + else: + raise ArgumentIndexError(self, argindex) + @classmethod def eval(cls, x): - if x is S.Zero: - return S.Zero - else: - return (x*cos(x) - sin(x))/(x*x) + if x.is_Number: + if x is S.NaN: + return S.NaN + elif x is S.Zero: + return S.Zero + else: + return (x*cos(x) - sin(x))/(x*x) class sinc(Function): nargs = 1 def fdiff(self, argindex=1): - return sinc_grad(self.args[0]) + if argindex==1: + return sinc_grad(self.args[0]) + else: + raise ArgumentIndexError(self, argindex) + @classmethod - def eval(cls, x): - if x is S.Zero: - return S.One - else: - return sin(x)/x - + def eval(cls, arg): + if arg.is_Number: + if arg is S.NaN: + return S.NaN + elif arg is S.Zero: + return S.One + else: + return sin(arg)/arg + + if arg.func is asin: + x = arg.args[0] + return x / arg + def _eval_is_real(self): return self.args[0].is_real +