From ea9763865d4fa81c01932e2f762e3b405c39d2b5 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Thu, 10 Jan 2013 11:58:26 +0000 Subject: [PATCH 1/7] added a term to warping function --- GPy/examples/warped_GP_demo.py | 2 +- GPy/models/warped_GP.py | 14 ++-- GPy/util/warping_functions.py | 115 +++++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+), 7 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 3b75694a..1fd8b105 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -22,7 +22,7 @@ Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|rbf|white|bias)') +m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|white|bias)') m.randomize() plt.figure() plt.xlabel('predicted f(Z)') diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index bf5af21f..00eb94d8 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -20,10 +20,10 @@ class warpedGP(GP_regression): def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): if warping_function == None: - self.warping_function = TanhWarpingFunction(warping_terms) - # self.warping_params = np.random.randn(self.warping_function.n_terms, 3) - self.warping_params = np.ones((self.warping_function.n_terms, 3))*0.0 # TODO better init - self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass + self.warping_function = TanhWarpingFunction_d(warping_terms) + self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) + # self.warping_params = np.ones((self.warping_function.n_terms*3 + 1,)) # TODO better init + # self.warp_params_shape = (self.warping_function.n_terms, 4) # todo get this from the subclass self.Z = Y.copy() self.N, self.D = Y.shape @@ -31,7 +31,7 @@ class warpedGP(GP_regression): GP_regression.__init__(self, X, self.Y, **kwargs) def set_param(self, x): - self.warping_params = x[:self.warping_function.num_parameters].reshape(self.warp_params_shape).copy() + self.warping_params = x[:self.warping_function.num_parameters] self.transform_data() GP_regression.set_param(self, x[self.warping_function.num_parameters:].copy()) @@ -63,16 +63,18 @@ class warpedGP(GP_regression): ll_grads = GP_regression.log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) + + warping_grads = np.append(warping_grads[:,:-1].flatten(), warping_grads[0,-1]) return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, return_covar_chain = True) - djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) + return -dquad_dpsi + djac_dpsi def plot_warping(self): diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 5a6dc6b3..79d8d9d8 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -155,3 +155,118 @@ class TanhWarpingFunction(WarpingFunction): variables = ['a', 'b', 'c'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) return names + + +class TanhWarpingFunction_d(WarpingFunction): + + def __init__(self,n_terms=3): + """n_terms specifies the number of tanh terms to be used""" + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + 1 + + def f(self,y,psi): + """transform y with f using parameter vector psi + psi = [[a,b,c]] + f = \sum_{terms} a * tanh(b*(y+c)) + """ + + #1. check that number of params is consistent + # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' + # assert psi.shape[1] == 4, 'inconsistent parameter dimensions' + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + #3. transform data + z = d*y.copy() + for i in range(len(mpsi)): + a,b,c = mpsi[i] + z += a*np.tanh(b*(y+c)) + return z + + + def f_inv(self, y, psi, iterations = 10): + """ + calculate the numerical inverse of f + + == input == + iterations: number of N.R. iterations + + """ + + y = y.copy() + z = np.ones_like(y) + + for i in range(iterations): + z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi) + + return z + + + def fgrad_y(self, y, psi, return_precalc = False): + """ + gradient of f w.r.t to y ([N x 1]) + returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + + + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + # vectorized version + + S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T + R = np.tanh(S) + D = 1-R**2 + + GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T + + if return_precalc: + return GRAD, S, R, D + + + return GRAD + + + def fgrad_y_psi(self, y, psi, return_covar_chain = False): + """ + gradient of f w.r.t to y and psi + + returns: NxIx4 tensor of partial derivatives + + """ + + mpsi = psi.copy() + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) + + gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in range(len(mpsi)): + a,b,c = mpsi[i] + gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T + gradients[:,:,0,3] = 1.0 + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + + for i in range(len(mpsi)): + a,b,c = mpsi[i] + covar_grad_chain[:, :, i, 0] = (r[i]).T + covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) + covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T + covar_grad_chain[:, :, 0, 3] = y + + return gradients, covar_grad_chain + + return gradients + + def get_param_names(self): + variables = ['a', 'b', 'c', 'd'] + names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) + names.append('warp_tanh_d') + return names From b01dd3d758d778e91e002bc04900a5a5a39bd702 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Tue, 22 Jan 2013 16:24:08 +0000 Subject: [PATCH 2/7] minor --- GPy/kern/linear.py | 2 +- GPy/models/warped_GP.py | 2 +- GPy/util/warping_functions.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/kern/linear.py b/GPy/kern/linear.py index 7246244e..f02cfb90 100644 --- a/GPy/kern/linear.py +++ b/GPy/kern/linear.py @@ -65,7 +65,7 @@ class linear(kernpart): self._X2cache = X2 self._dot_product = np.dot(X,X2.T) else: - # print "Cache hit!" + #print "Cache hit!" pass # TODO: insert debug message here (logging framework) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index 00eb94d8..695fd896 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -91,6 +91,6 @@ class warpedGP(GP_regression): # just a quick fix until I figure out something smarter. if in_unwarped_space: mu = self.warping_function.f_inv(mu, self.warping_params) - var = self.warping_function.f_inv(var, self.warping_params) + var = self.warping_function.f_inv(var[:, None], self.warping_params) return mu, var diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 79d8d9d8..af59dd49 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -81,7 +81,7 @@ class TanhWarpingFunction(WarpingFunction): iterations: number of N.R. iterations """ - + y = y.copy() z = np.ones_like(y) From d82763be3935cca62d507fdab06cf1ac866e3792 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Fusi?= Date: Tue, 22 Jan 2013 17:59:02 +0000 Subject: [PATCH 3/7] fixed _get_param_names --- GPy/util/warping_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a7c4c282..89c88dd2 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -265,7 +265,7 @@ class TanhWarpingFunction_d(WarpingFunction): return gradients - def get_param_names(self): + def _get_param_names(self): variables = ['a', 'b', 'c', 'd'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) names.append('warp_tanh_d') From f661b4b64ee24808dea688f0628fa68b62970834 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 25 Feb 2013 12:20:20 +0000 Subject: [PATCH 4/7] moved randomize() in a more proper place --- GPy/core/model.py | 3 ++- GPy/util/misc.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 6b7d32c6..b6cedbaf 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -185,7 +185,7 @@ class model(parameterised): :verbose: whether to show informations about the current restart :parallel: whether to run each restart as a separate process. It relies on the multiprocessing module. :num_processes: number of workers in the multiprocessing pool - + ..Note: If num_processes is None, the number of workes in the multiprocessing pool is automatically set to the number of processors on the current machine. @@ -198,6 +198,7 @@ class model(parameterised): jobs = [] pool = mp.Pool(processes=num_processes) for i in range(Nrestarts): + self.randomize() job = pool.apply_async(opt_wrapper, args = (self,), kwds = kwargs) jobs.append(job) diff --git a/GPy/util/misc.py b/GPy/util/misc.py index e3b91dce..e0f70703 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -9,7 +9,6 @@ def opt_wrapper(m, **kwargs): This function just wraps the optimization procedure of a GPy object so that optimize() pickleable (necessary for multiprocessing). """ - m.randomize() m.optimize(**kwargs) return m.optimization_runs[-1] From 734edfe9d9eb668b521e8446ff36826de68e5903 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 25 Feb 2013 12:20:33 +0000 Subject: [PATCH 5/7] testing priors in the demo --- GPy/examples/warped_GP_demo.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index a785770c..71ad663d 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -7,15 +7,14 @@ import scipy as sp import pdb, sys, pickle import matplotlib.pylab as plt import GPy -np.random.seed(3) +np.random.seed(1) N = 100 # sample inputs and outputs X = np.random.uniform(-np.pi,np.pi,(N,1)) Y = np.sin(X)+np.random.randn(N,1)*0.05 # Y += np.abs(Y.min()) + 0.5 -Z = np.exp(Y)# Y**(1/3.0) - +Z = np.exp(3.0*Y)#Y**(1/3.0) # rescaling targets? Zmax = Z.max() Zmin = Z.min() @@ -23,12 +22,21 @@ Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 m = GPy.models.warpedGP(X, Z, warping_terms = 2) m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|noise|bias)') +# m.unconstrain('tanh_d') +# m.constrain_fixed('tanh_d', 1.0) + +# lognormal = GPy.priors.log_Gaussian(1.0, 2.0) # 1,2 +# gaussian = GPy.priors.Gaussian(0, 10) # 0, 10 +# m.set_prior('tanh_c', gaussian) +# m.set_prior('(tanh_b|tanh_a)', lognormal) + m.randomize() plt.figure() plt.xlabel('predicted f(Z)') plt.ylabel('actual f(Z)') plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'before training') -m.optimize(messages = True) +# m.optimize(messages = True) +m.optimize_restarts(4, parallel = True) plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'after training') plt.legend(loc = 0) m.plot_warping() From 794f918eb0dccb7ac514b17e825a53d0c87e538b Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Wed, 13 Mar 2013 09:27:36 +0000 Subject: [PATCH 6/7] changed version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 40c89ccb..c30988c5 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ from numpy.distutils.core import Extension, setup #from sphinx.setup_command import BuildDoc # Version number -version = '0.1.3' +version = '0.2' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() From c818268a9e56319bda7d357a9ed95e307fa6a75c Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 15 Mar 2013 17:12:43 +0000 Subject: [PATCH 7/7] changed prediction code --- GPy/examples/warped_GP_demo.py | 39 ++++++++++++------------ GPy/models/warped_GP.py | 55 +++++++++++++++++----------------- GPy/util/warping_functions.py | 2 +- 3 files changed, 48 insertions(+), 48 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 71ad663d..8250caca 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -7,44 +7,43 @@ import scipy as sp import pdb, sys, pickle import matplotlib.pylab as plt import GPy -np.random.seed(1) +np.random.seed(2) -N = 100 +N = 120 # sample inputs and outputs X = np.random.uniform(-np.pi,np.pi,(N,1)) Y = np.sin(X)+np.random.randn(N,1)*0.05 -# Y += np.abs(Y.min()) + 0.5 -Z = np.exp(3.0*Y)#Y**(1/3.0) -# rescaling targets? +Y += np.abs(Y.min()) + 0.5 +Z = np.exp(Y)#Y**(1/3.0) Zmax = Z.max() Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 +train = range(X.shape[0])[:100] +test = range(X.shape[0])[100:] -m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|noise|bias)') -# m.unconstrain('tanh_d') -# m.constrain_fixed('tanh_d', 1.0) - -# lognormal = GPy.priors.log_Gaussian(1.0, 2.0) # 1,2 -# gaussian = GPy.priors.Gaussian(0, 10) # 0, 10 -# m.set_prior('tanh_c', gaussian) -# m.set_prior('(tanh_b|tanh_a)', lognormal) - +kernel = GPy.kern.rbf(1) + GPy.kern.bias(1) +m = GPy.models.warpedGP(X[train], Z[train], kernel=kernel, warping_terms = 2) +m.constrain_positive('(tanh_a|tanh_b|rbf|noise|bias)') +m.constrain_fixed('tanh_d', 1.0) m.randomize() plt.figure() plt.xlabel('predicted f(Z)') plt.ylabel('actual f(Z)') -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'before training') -# m.optimize(messages = True) -m.optimize_restarts(4, parallel = True) -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'after training') +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'before training') +m.optimize(messages = True) +# m.optimize_restarts(4, parallel = True, messages = True) +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'after training') plt.legend(loc = 0) m.plot_warping() plt.figure() plt.title('warped GP fit') m.plot() +m.optimize(messages=1) +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Y[test].flatten(), 'x'); plt.title('prediction in unwarped space') +m.predict_in_warped_space = True +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Z[test].flatten(), 'x'); plt.title('prediction in warped space') -m1 = GPy.models.GP_regression(X, Z) +m1 = GPy.models.GP_regression(X[train], Z[train]) m1.constrain_positive('(rbf|noise|bias)') m1.randomize() m1.optimize(messages = True) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index d08288f9..052f8d8e 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -9,44 +9,52 @@ from ..util.linalg import pdinv from ..util.plot import gpplot from ..util.warping_functions import * from GP_regression import GP_regression +from GP import GP +from .. import likelihoods +from .. import kern +class warpedGP(GP): + def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False, Xslices=None): -class warpedGP(GP_regression): - def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): + if kernel is None: + kernel = kern.rbf(X.shape[1]) if warping_function == None: self.warping_function = TanhWarpingFunction_d(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) - self.Z = Y.copy() - self.N, self.D = Y.shape - GP_regression.__init__(self, X, self.transform_data(), **kwargs) + self.has_uncertain_inputs = False + self.Y_untransformed = Y.copy() + self.predict_in_warped_space = False + likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) + + GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices) def _set_params(self, x): self.warping_params = x[:self.warping_function.num_parameters] Y = self.transform_data() self.likelihood.set_data(Y) - GP_regression._set_params(self, x[self.warping_function.num_parameters:].copy()) + GP._set_params(self, x[self.warping_function.num_parameters:].copy()) def _get_params(self): - return np.hstack((self.warping_params.flatten().copy(), GP_regression._get_params(self).copy())) + return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy())) def _get_param_names(self): warping_names = self.warping_function._get_param_names() - param_names = GP_regression._get_param_names(self) + param_names = GP._get_param_names(self) return warping_names + param_names def transform_data(self): - Y = self.warping_function.f(self.Z.copy(), self.warping_params).copy() + Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy() return Y def log_likelihood(self): - ll = GP_regression.log_likelihood(self) - jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params) + ll = GP.log_likelihood(self) + jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) return ll + np.log(jacobian).sum() def _log_likelihood_gradients(self): - ll_grads = GP_regression._log_likelihood_gradients(self) + ll_grads = GP._log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.likelihood.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) @@ -54,29 +62,22 @@ class warpedGP(GP_regression): return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): - grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) - grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, + grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) + grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params, return_covar_chain = True) djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) - return -dquad_dpsi + djac_dpsi def plot_warping(self): - self.warping_function.plot(self.warping_params, self.Z.min(), self.Z.max()) + self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max()) - def predict(self, X, in_unwarped_space = False, **kwargs): - mu, var, _025pm, _975pm = GP_regression.predict(self, X, **kwargs) + def _raw_predict(self, *args, **kwargs): + mu, var = GP._raw_predict(self, *args, **kwargs) - # The plot() function calls _set_params() before calling predict() - # this is causing the observations to be plotted in the transformed - # space (where Y lives), making the plot looks very wrong - # if the predictions are made in the untransformed space - # (where Z lives). To fix this I included the option below. It's - # just a quick fix until I figure out something smarter. - if in_unwarped_space: + if self.predict_in_warped_space: mu = self.warping_function.f_inv(mu, self.warping_params) - var = self.warping_function.f_inv(var[:, None], self.warping_params) + var = self.warping_function.f_inv(var, self.warping_params) - return mu, var, _025pm, _975pm + return mu, var diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 89c88dd2..3ea6dcc6 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -185,7 +185,7 @@ class TanhWarpingFunction_d(WarpingFunction): return z - def f_inv(self, y, psi, iterations = 10): + def f_inv(self, y, psi, iterations = 30): """ calculate the numerical inverse of f