diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 71ad663d..8250caca 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -7,44 +7,43 @@ import scipy as sp import pdb, sys, pickle import matplotlib.pylab as plt import GPy -np.random.seed(1) +np.random.seed(2) -N = 100 +N = 120 # sample inputs and outputs X = np.random.uniform(-np.pi,np.pi,(N,1)) Y = np.sin(X)+np.random.randn(N,1)*0.05 -# Y += np.abs(Y.min()) + 0.5 -Z = np.exp(3.0*Y)#Y**(1/3.0) -# rescaling targets? +Y += np.abs(Y.min()) + 0.5 +Z = np.exp(Y)#Y**(1/3.0) Zmax = Z.max() Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 +train = range(X.shape[0])[:100] +test = range(X.shape[0])[100:] -m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|noise|bias)') -# m.unconstrain('tanh_d') -# m.constrain_fixed('tanh_d', 1.0) - -# lognormal = GPy.priors.log_Gaussian(1.0, 2.0) # 1,2 -# gaussian = GPy.priors.Gaussian(0, 10) # 0, 10 -# m.set_prior('tanh_c', gaussian) -# m.set_prior('(tanh_b|tanh_a)', lognormal) - +kernel = GPy.kern.rbf(1) + GPy.kern.bias(1) +m = GPy.models.warpedGP(X[train], Z[train], kernel=kernel, warping_terms = 2) +m.constrain_positive('(tanh_a|tanh_b|rbf|noise|bias)') +m.constrain_fixed('tanh_d', 1.0) m.randomize() plt.figure() plt.xlabel('predicted f(Z)') plt.ylabel('actual f(Z)') -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'before training') -# m.optimize(messages = True) -m.optimize_restarts(4, parallel = True) -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'after training') +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'before training') +m.optimize(messages = True) +# m.optimize_restarts(4, parallel = True, messages = True) +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'after training') plt.legend(loc = 0) m.plot_warping() plt.figure() plt.title('warped GP fit') m.plot() +m.optimize(messages=1) +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Y[test].flatten(), 'x'); plt.title('prediction in unwarped space') +m.predict_in_warped_space = True +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Z[test].flatten(), 'x'); plt.title('prediction in warped space') -m1 = GPy.models.GP_regression(X, Z) +m1 = GPy.models.GP_regression(X[train], Z[train]) m1.constrain_positive('(rbf|noise|bias)') m1.randomize() m1.optimize(messages = True) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index d08288f9..052f8d8e 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -9,44 +9,52 @@ from ..util.linalg import pdinv from ..util.plot import gpplot from ..util.warping_functions import * from GP_regression import GP_regression +from GP import GP +from .. import likelihoods +from .. import kern +class warpedGP(GP): + def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False, Xslices=None): -class warpedGP(GP_regression): - def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): + if kernel is None: + kernel = kern.rbf(X.shape[1]) if warping_function == None: self.warping_function = TanhWarpingFunction_d(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) - self.Z = Y.copy() - self.N, self.D = Y.shape - GP_regression.__init__(self, X, self.transform_data(), **kwargs) + self.has_uncertain_inputs = False + self.Y_untransformed = Y.copy() + self.predict_in_warped_space = False + likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) + + GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices) def _set_params(self, x): self.warping_params = x[:self.warping_function.num_parameters] Y = self.transform_data() self.likelihood.set_data(Y) - GP_regression._set_params(self, x[self.warping_function.num_parameters:].copy()) + GP._set_params(self, x[self.warping_function.num_parameters:].copy()) def _get_params(self): - return np.hstack((self.warping_params.flatten().copy(), GP_regression._get_params(self).copy())) + return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy())) def _get_param_names(self): warping_names = self.warping_function._get_param_names() - param_names = GP_regression._get_param_names(self) + param_names = GP._get_param_names(self) return warping_names + param_names def transform_data(self): - Y = self.warping_function.f(self.Z.copy(), self.warping_params).copy() + Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy() return Y def log_likelihood(self): - ll = GP_regression.log_likelihood(self) - jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params) + ll = GP.log_likelihood(self) + jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) return ll + np.log(jacobian).sum() def _log_likelihood_gradients(self): - ll_grads = GP_regression._log_likelihood_gradients(self) + ll_grads = GP._log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.likelihood.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) @@ -54,29 +62,22 @@ class warpedGP(GP_regression): return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): - grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) - grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, + grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) + grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params, return_covar_chain = True) djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) - return -dquad_dpsi + djac_dpsi def plot_warping(self): - self.warping_function.plot(self.warping_params, self.Z.min(), self.Z.max()) + self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max()) - def predict(self, X, in_unwarped_space = False, **kwargs): - mu, var, _025pm, _975pm = GP_regression.predict(self, X, **kwargs) + def _raw_predict(self, *args, **kwargs): + mu, var = GP._raw_predict(self, *args, **kwargs) - # The plot() function calls _set_params() before calling predict() - # this is causing the observations to be plotted in the transformed - # space (where Y lives), making the plot looks very wrong - # if the predictions are made in the untransformed space - # (where Z lives). To fix this I included the option below. It's - # just a quick fix until I figure out something smarter. - if in_unwarped_space: + if self.predict_in_warped_space: mu = self.warping_function.f_inv(mu, self.warping_params) - var = self.warping_function.f_inv(var[:, None], self.warping_params) + var = self.warping_function.f_inv(var, self.warping_params) - return mu, var, _025pm, _975pm + return mu, var diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 89c88dd2..3ea6dcc6 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -185,7 +185,7 @@ class TanhWarpingFunction_d(WarpingFunction): return z - def f_inv(self, y, psi, iterations = 10): + def f_inv(self, y, psi, iterations = 30): """ calculate the numerical inverse of f