From ea9763865d4fa81c01932e2f762e3b405c39d2b5 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Thu, 10 Jan 2013 11:58:26 +0000 Subject: [PATCH 01/18] added a term to warping function --- GPy/examples/warped_GP_demo.py | 2 +- GPy/models/warped_GP.py | 14 ++-- GPy/util/warping_functions.py | 115 +++++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+), 7 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 3b75694a..1fd8b105 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -22,7 +22,7 @@ Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|rbf|white|bias)') +m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|white|bias)') m.randomize() plt.figure() plt.xlabel('predicted f(Z)') diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index bf5af21f..00eb94d8 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -20,10 +20,10 @@ class warpedGP(GP_regression): def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): if warping_function == None: - self.warping_function = TanhWarpingFunction(warping_terms) - # self.warping_params = np.random.randn(self.warping_function.n_terms, 3) - self.warping_params = np.ones((self.warping_function.n_terms, 3))*0.0 # TODO better init - self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass + self.warping_function = TanhWarpingFunction_d(warping_terms) + self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) + # self.warping_params = np.ones((self.warping_function.n_terms*3 + 1,)) # TODO better init + # self.warp_params_shape = (self.warping_function.n_terms, 4) # todo get this from the subclass self.Z = Y.copy() self.N, self.D = Y.shape @@ -31,7 +31,7 @@ class warpedGP(GP_regression): GP_regression.__init__(self, X, self.Y, **kwargs) def set_param(self, x): - self.warping_params = x[:self.warping_function.num_parameters].reshape(self.warp_params_shape).copy() + self.warping_params = x[:self.warping_function.num_parameters] self.transform_data() GP_regression.set_param(self, x[self.warping_function.num_parameters:].copy()) @@ -63,16 +63,18 @@ class warpedGP(GP_regression): ll_grads = GP_regression.log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) + + warping_grads = np.append(warping_grads[:,:-1].flatten(), warping_grads[0,-1]) return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, return_covar_chain = True) - djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) + return -dquad_dpsi + djac_dpsi def plot_warping(self): diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 5a6dc6b3..79d8d9d8 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -155,3 +155,118 @@ class TanhWarpingFunction(WarpingFunction): variables = ['a', 'b', 'c'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) return names + + +class TanhWarpingFunction_d(WarpingFunction): + + def __init__(self,n_terms=3): + """n_terms specifies the number of tanh terms to be used""" + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + 1 + + def f(self,y,psi): + """transform y with f using parameter vector psi + psi = [[a,b,c]] + f = \sum_{terms} a * tanh(b*(y+c)) + """ + + #1. check that number of params is consistent + # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' + # assert psi.shape[1] == 4, 'inconsistent parameter dimensions' + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + #3. transform data + z = d*y.copy() + for i in range(len(mpsi)): + a,b,c = mpsi[i] + z += a*np.tanh(b*(y+c)) + return z + + + def f_inv(self, y, psi, iterations = 10): + """ + calculate the numerical inverse of f + + == input == + iterations: number of N.R. iterations + + """ + + y = y.copy() + z = np.ones_like(y) + + for i in range(iterations): + z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi) + + return z + + + def fgrad_y(self, y, psi, return_precalc = False): + """ + gradient of f w.r.t to y ([N x 1]) + returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + + + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + # vectorized version + + S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T + R = np.tanh(S) + D = 1-R**2 + + GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T + + if return_precalc: + return GRAD, S, R, D + + + return GRAD + + + def fgrad_y_psi(self, y, psi, return_covar_chain = False): + """ + gradient of f w.r.t to y and psi + + returns: NxIx4 tensor of partial derivatives + + """ + + mpsi = psi.copy() + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) + + gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in range(len(mpsi)): + a,b,c = mpsi[i] + gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T + gradients[:,:,0,3] = 1.0 + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + + for i in range(len(mpsi)): + a,b,c = mpsi[i] + covar_grad_chain[:, :, i, 0] = (r[i]).T + covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) + covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T + covar_grad_chain[:, :, 0, 3] = y + + return gradients, covar_grad_chain + + return gradients + + def get_param_names(self): + variables = ['a', 'b', 'c', 'd'] + names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) + names.append('warp_tanh_d') + return names From b01dd3d758d778e91e002bc04900a5a5a39bd702 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Tue, 22 Jan 2013 16:24:08 +0000 Subject: [PATCH 02/18] minor --- GPy/kern/linear.py | 2 +- GPy/models/warped_GP.py | 2 +- GPy/util/warping_functions.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/kern/linear.py b/GPy/kern/linear.py index 7246244e..f02cfb90 100644 --- a/GPy/kern/linear.py +++ b/GPy/kern/linear.py @@ -65,7 +65,7 @@ class linear(kernpart): self._X2cache = X2 self._dot_product = np.dot(X,X2.T) else: - # print "Cache hit!" + #print "Cache hit!" pass # TODO: insert debug message here (logging framework) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index 00eb94d8..695fd896 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -91,6 +91,6 @@ class warpedGP(GP_regression): # just a quick fix until I figure out something smarter. if in_unwarped_space: mu = self.warping_function.f_inv(mu, self.warping_params) - var = self.warping_function.f_inv(var, self.warping_params) + var = self.warping_function.f_inv(var[:, None], self.warping_params) return mu, var diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 79d8d9d8..af59dd49 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -81,7 +81,7 @@ class TanhWarpingFunction(WarpingFunction): iterations: number of N.R. iterations """ - + y = y.copy() z = np.ones_like(y) From d82763be3935cca62d507fdab06cf1ac866e3792 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Fusi?= Date: Tue, 22 Jan 2013 17:59:02 +0000 Subject: [PATCH 03/18] fixed _get_param_names --- GPy/util/warping_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a7c4c282..89c88dd2 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -265,7 +265,7 @@ class TanhWarpingFunction_d(WarpingFunction): return gradients - def get_param_names(self): + def _get_param_names(self): variables = ['a', 'b', 'c', 'd'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) names.append('warp_tanh_d') From f661b4b64ee24808dea688f0628fa68b62970834 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 25 Feb 2013 12:20:20 +0000 Subject: [PATCH 04/18] moved randomize() in a more proper place --- GPy/core/model.py | 3 ++- GPy/util/misc.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 6b7d32c6..b6cedbaf 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -185,7 +185,7 @@ class model(parameterised): :verbose: whether to show informations about the current restart :parallel: whether to run each restart as a separate process. It relies on the multiprocessing module. :num_processes: number of workers in the multiprocessing pool - + ..Note: If num_processes is None, the number of workes in the multiprocessing pool is automatically set to the number of processors on the current machine. @@ -198,6 +198,7 @@ class model(parameterised): jobs = [] pool = mp.Pool(processes=num_processes) for i in range(Nrestarts): + self.randomize() job = pool.apply_async(opt_wrapper, args = (self,), kwds = kwargs) jobs.append(job) diff --git a/GPy/util/misc.py b/GPy/util/misc.py index e3b91dce..e0f70703 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -9,7 +9,6 @@ def opt_wrapper(m, **kwargs): This function just wraps the optimization procedure of a GPy object so that optimize() pickleable (necessary for multiprocessing). """ - m.randomize() m.optimize(**kwargs) return m.optimization_runs[-1] From 734edfe9d9eb668b521e8446ff36826de68e5903 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 25 Feb 2013 12:20:33 +0000 Subject: [PATCH 05/18] testing priors in the demo --- GPy/examples/warped_GP_demo.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index a785770c..71ad663d 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -7,15 +7,14 @@ import scipy as sp import pdb, sys, pickle import matplotlib.pylab as plt import GPy -np.random.seed(3) +np.random.seed(1) N = 100 # sample inputs and outputs X = np.random.uniform(-np.pi,np.pi,(N,1)) Y = np.sin(X)+np.random.randn(N,1)*0.05 # Y += np.abs(Y.min()) + 0.5 -Z = np.exp(Y)# Y**(1/3.0) - +Z = np.exp(3.0*Y)#Y**(1/3.0) # rescaling targets? Zmax = Z.max() Zmin = Z.min() @@ -23,12 +22,21 @@ Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 m = GPy.models.warpedGP(X, Z, warping_terms = 2) m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|noise|bias)') +# m.unconstrain('tanh_d') +# m.constrain_fixed('tanh_d', 1.0) + +# lognormal = GPy.priors.log_Gaussian(1.0, 2.0) # 1,2 +# gaussian = GPy.priors.Gaussian(0, 10) # 0, 10 +# m.set_prior('tanh_c', gaussian) +# m.set_prior('(tanh_b|tanh_a)', lognormal) + m.randomize() plt.figure() plt.xlabel('predicted f(Z)') plt.ylabel('actual f(Z)') plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'before training') -m.optimize(messages = True) +# m.optimize(messages = True) +m.optimize_restarts(4, parallel = True) plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'after training') plt.legend(loc = 0) m.plot_warping() From 794f918eb0dccb7ac514b17e825a53d0c87e538b Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Wed, 13 Mar 2013 09:27:36 +0000 Subject: [PATCH 06/18] changed version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 40c89ccb..c30988c5 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ from numpy.distutils.core import Extension, setup #from sphinx.setup_command import BuildDoc # Version number -version = '0.1.3' +version = '0.2' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() From c818268a9e56319bda7d357a9ed95e307fa6a75c Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 15 Mar 2013 17:12:43 +0000 Subject: [PATCH 07/18] changed prediction code --- GPy/examples/warped_GP_demo.py | 39 ++++++++++++------------ GPy/models/warped_GP.py | 55 +++++++++++++++++----------------- GPy/util/warping_functions.py | 2 +- 3 files changed, 48 insertions(+), 48 deletions(-) diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 71ad663d..8250caca 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -7,44 +7,43 @@ import scipy as sp import pdb, sys, pickle import matplotlib.pylab as plt import GPy -np.random.seed(1) +np.random.seed(2) -N = 100 +N = 120 # sample inputs and outputs X = np.random.uniform(-np.pi,np.pi,(N,1)) Y = np.sin(X)+np.random.randn(N,1)*0.05 -# Y += np.abs(Y.min()) + 0.5 -Z = np.exp(3.0*Y)#Y**(1/3.0) -# rescaling targets? +Y += np.abs(Y.min()) + 0.5 +Z = np.exp(Y)#Y**(1/3.0) Zmax = Z.max() Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 +train = range(X.shape[0])[:100] +test = range(X.shape[0])[100:] -m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|noise|bias)') -# m.unconstrain('tanh_d') -# m.constrain_fixed('tanh_d', 1.0) - -# lognormal = GPy.priors.log_Gaussian(1.0, 2.0) # 1,2 -# gaussian = GPy.priors.Gaussian(0, 10) # 0, 10 -# m.set_prior('tanh_c', gaussian) -# m.set_prior('(tanh_b|tanh_a)', lognormal) - +kernel = GPy.kern.rbf(1) + GPy.kern.bias(1) +m = GPy.models.warpedGP(X[train], Z[train], kernel=kernel, warping_terms = 2) +m.constrain_positive('(tanh_a|tanh_b|rbf|noise|bias)') +m.constrain_fixed('tanh_d', 1.0) m.randomize() plt.figure() plt.xlabel('predicted f(Z)') plt.ylabel('actual f(Z)') -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'before training') -# m.optimize(messages = True) -m.optimize_restarts(4, parallel = True) -plt.plot(m.likelihood.Y, Y, 'o', alpha = 0.5, label = 'after training') +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'before training') +m.optimize(messages = True) +# m.optimize_restarts(4, parallel = True, messages = True) +plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'after training') plt.legend(loc = 0) m.plot_warping() plt.figure() plt.title('warped GP fit') m.plot() +m.optimize(messages=1) +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Y[test].flatten(), 'x'); plt.title('prediction in unwarped space') +m.predict_in_warped_space = True +plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Z[test].flatten(), 'x'); plt.title('prediction in warped space') -m1 = GPy.models.GP_regression(X, Z) +m1 = GPy.models.GP_regression(X[train], Z[train]) m1.constrain_positive('(rbf|noise|bias)') m1.randomize() m1.optimize(messages = True) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index d08288f9..052f8d8e 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -9,44 +9,52 @@ from ..util.linalg import pdinv from ..util.plot import gpplot from ..util.warping_functions import * from GP_regression import GP_regression +from GP import GP +from .. import likelihoods +from .. import kern +class warpedGP(GP): + def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False, Xslices=None): -class warpedGP(GP_regression): - def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): + if kernel is None: + kernel = kern.rbf(X.shape[1]) if warping_function == None: self.warping_function = TanhWarpingFunction_d(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) - self.Z = Y.copy() - self.N, self.D = Y.shape - GP_regression.__init__(self, X, self.transform_data(), **kwargs) + self.has_uncertain_inputs = False + self.Y_untransformed = Y.copy() + self.predict_in_warped_space = False + likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) + + GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices) def _set_params(self, x): self.warping_params = x[:self.warping_function.num_parameters] Y = self.transform_data() self.likelihood.set_data(Y) - GP_regression._set_params(self, x[self.warping_function.num_parameters:].copy()) + GP._set_params(self, x[self.warping_function.num_parameters:].copy()) def _get_params(self): - return np.hstack((self.warping_params.flatten().copy(), GP_regression._get_params(self).copy())) + return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy())) def _get_param_names(self): warping_names = self.warping_function._get_param_names() - param_names = GP_regression._get_param_names(self) + param_names = GP._get_param_names(self) return warping_names + param_names def transform_data(self): - Y = self.warping_function.f(self.Z.copy(), self.warping_params).copy() + Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy() return Y def log_likelihood(self): - ll = GP_regression.log_likelihood(self) - jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params) + ll = GP.log_likelihood(self) + jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) return ll + np.log(jacobian).sum() def _log_likelihood_gradients(self): - ll_grads = GP_regression._log_likelihood_gradients(self) + ll_grads = GP._log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.likelihood.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) @@ -54,29 +62,22 @@ class warpedGP(GP_regression): return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): - grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) - grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, + grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) + grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params, return_covar_chain = True) djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) - return -dquad_dpsi + djac_dpsi def plot_warping(self): - self.warping_function.plot(self.warping_params, self.Z.min(), self.Z.max()) + self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max()) - def predict(self, X, in_unwarped_space = False, **kwargs): - mu, var, _025pm, _975pm = GP_regression.predict(self, X, **kwargs) + def _raw_predict(self, *args, **kwargs): + mu, var = GP._raw_predict(self, *args, **kwargs) - # The plot() function calls _set_params() before calling predict() - # this is causing the observations to be plotted in the transformed - # space (where Y lives), making the plot looks very wrong - # if the predictions are made in the untransformed space - # (where Z lives). To fix this I included the option below. It's - # just a quick fix until I figure out something smarter. - if in_unwarped_space: + if self.predict_in_warped_space: mu = self.warping_function.f_inv(mu, self.warping_params) - var = self.warping_function.f_inv(var[:, None], self.warping_params) + var = self.warping_function.f_inv(var, self.warping_params) - return mu, var, _025pm, _975pm + return mu, var diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 89c88dd2..3ea6dcc6 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -185,7 +185,7 @@ class TanhWarpingFunction_d(WarpingFunction): return z - def f_inv(self, y, psi, iterations = 10): + def f_inv(self, y, psi, iterations = 30): """ calculate the numerical inverse of f From c44493077a1ac30c9c0d0420a4d2ee72cdebc6a4 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 18 Mar 2013 14:31:26 +0000 Subject: [PATCH 08/18] made parallel optimize_restart responsive to ctrl+c --- GPy/core/model.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 703e615d..7a8a3429 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -188,19 +188,23 @@ class model(parameterised): """ - initial_parameters = self._get_params_transformed() if parallel: - jobs = [] - pool = mp.Pool(processes=num_processes) - for i in range(Nrestarts): - self.randomize() - job = pool.apply_async(opt_wrapper, args = (self,), kwds = kwargs) - jobs.append(job) + try: + jobs = [] + pool = mp.Pool(processes=num_processes) + for i in range(Nrestarts): + self.randomize() + job = pool.apply_async(opt_wrapper, args = (self,), kwds = kwargs) + jobs.append(job) - pool.close() # signal that no more data coming in - pool.join() # wait for all the tasks to complete + pool.close() # signal that no more data coming in + pool.join() # wait for all the tasks to complete + except KeyboardInterrupt: + print "Ctrl+c received, terminating and joining pool." + pool.terminate() + pool.join() for i in range(Nrestarts): try: From 8ab1cfaf65a530721951995a6ca82a437cb4075c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Fusi?= Date: Thu, 21 Mar 2013 11:05:17 +0000 Subject: [PATCH 09/18] changes in GPLVM plotting --- GPy/core/model.py | 2 +- GPy/inference/SGD.py | 82 +++++++++++++++++++++++++++++++++++--------- GPy/models/GPLVM.py | 25 +++++++++----- 3 files changed, 84 insertions(+), 25 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 7a8a3429..76f6dea7 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -256,7 +256,7 @@ class model(parameterised): self._set_params_transformed(x) LL_gradients = self._transform_gradients(self._log_likelihood_gradients()) prior_gradients = self._transform_gradients(self._log_prior_gradients()) - return -LL_gradients - prior_gradients + return - LL_gradients - prior_gradients def objective_and_gradients(self, x): obj_f = self.objective_function(x) diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index a1eb82d1..4fb12b28 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -3,6 +3,7 @@ import scipy as sp import scipy.sparse from optimization import Optimizer from scipy import linalg, optimize +import pylab as plt import copy import sys @@ -31,6 +32,16 @@ class opt_SGD(Optimizer): self.batch_size = batch_size self.self_paced = self_paced self.center = center + self.param_traces = [('noise',[])] + if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: + self.param_traces.append(('bias',[])) + if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: + self.param_traces.append(('linear',[])) + if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1: + self.param_traces.append(('rbf_var',[])) + + self.param_traces = dict(self.param_traces) + self.fopt_trace = [] num_params = len(self.model._get_params()) if isinstance(self.learning_rate, float): @@ -48,6 +59,18 @@ class opt_SGD(Optimizer): status += "Time elapsed: \t\t\t %s\n" % self.time return status + def plot_traces(self): + plt.figure() + plt.subplot(211) + plt.title('Parameters') + for k in self.param_traces.keys(): + plt.plot(self.param_traces[k], label=k) + plt.legend(loc=0) + plt.subplot(212) + plt.title('Objective function') + plt.plot(self.fopt_trace) + + def non_null_samples(self, data): return (np.isnan(data).sum(axis=1) == 0) @@ -128,25 +151,37 @@ class opt_SGD(Optimizer): def step_with_missing_data(self, f_fp, X, step, shapes, sparse_matrix): N, Q = X.shape + if not sparse_matrix: + Y = self.model.likelihood.Y samples = self.non_null_samples(self.model.likelihood.Y) self.model.N = samples.sum() - self.model.likelihood.Y = self.model.likelihood.Y[samples] + + if self.center: + self.model.likelihood._mean = Y[samples].mean() + self.model.likelihood._std = Y[samples].std() + + self.model.likelihood.set_data(Y[samples]) else: samples = self.model.likelihood.Y.nonzero()[0] self.model.N = len(samples) - self.model.likelihood.Y = np.asarray(self.model.likelihood.Y[samples].todense(), dtype = np.float64) + Y = np.asarray(self.model.likelihood.Y[samples].todense(), dtype = np.float64) + if self.center: + self.model.likelihood._mean = Y.mean() + self.model.likelihood._std = Y.std() - self.model.likelihood.N = self.model.N + self.model.likelihood.set_data(Y) + + # self.model.likelihood.N = self.model.N j = self.subset_parameter_vector(self.x_opt, samples, shapes) self.model.X = X[samples] if self.model.N == 0 or self.model.likelihood.Y.std() == 0.0: return 0, step, self.model.N - if self.center: - self.model.likelihood.Y -= self.model.likelihood.Y.mean() - self.model.likelihood.Y /= self.model.likelihood.Y.std() + # if self.center: + # self.model.likelihood.Y -= self.model.likelihood.Y.mean() + # self.model.likelihood.Y /= self.model.likelihood.Y.std() model_name = self.model.__class__.__name__ @@ -154,13 +189,13 @@ class opt_SGD(Optimizer): self.model.likelihood.trYYT = np.sum(np.square(self.model.likelihood.Y)) b, p = self.shift_constraints(j) - - momentum_term = self.momentum * step[j] - f, fp = f_fp(self.x_opt[j]) - step[j] = self.learning_rate[j] * fp - self.x_opt[j] -= step[j] + momentum_term + # momentum_term = self.momentum * step[j] + # step[j] = self.learning_rate[j] * fp + # self.x_opt[j] -= step[j] + momentum_term + step[j] = self.momentum * step[j] + self.learning_rate[j] * fp + self.x_opt[j] -= step[j] self.restore_constraints(b, p) return f, step, self.model.N @@ -177,10 +212,14 @@ class opt_SGD(Optimizer): missing_data = self.check_for_missing(self.model.likelihood.Y) self.model.likelihood.YYT = None + self.model.likelihood.trYYT = None + self.model.likelihood._mean = 0.0 + self.model.likelihood._std = 1.0 num_params = self.model._get_params() - step = np.zeros_like(num_params) + step = np.zeros_like(num_params) for it in range(self.iterations): + if it == 0 or self.self_paced is False: features = np.random.permutation(Y.shape[1]) else: @@ -195,17 +234,21 @@ class opt_SGD(Optimizer): for j in features: count += 1 self.model.D = len(j) - self.model.likelihood.Y = Y[:, j] + self.model.likelihood.D = len(j) + self.model.likelihood.set_data(Y[:, j]) if missing_data or sparse_matrix: shapes = self.get_param_shapes(N, Q) f, step, Nj = self.step_with_missing_data(f_fp, X, step, shapes, sparse_matrix) else: Nj = N - momentum_term = self.momentum * step # compute momentum using update(t-1) + # momentum_term = self.momentum * step # compute momentum using update(t-1) f, fp = f_fp(self.x_opt) - step = self.learning_rate * fp # compute update(t) - self.x_opt -= step + momentum_term + # step = self.learning_rate * fp # compute update(t) + # self.x_opt -= step + momentum_term + step = self.momentum * step + self.learning_rate * fp + self.x_opt -= step + if self.messages == 2: noise = np.exp(self.x_opt)[-1] @@ -216,12 +259,19 @@ class opt_SGD(Optimizer): NLL.append(f) + self.fopt_trace.append(f) + for k in self.param_traces.keys(): + self.param_traces[k].append(self.model.get(k)[0]) + + + # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) self.model.N = N self.model.X = X self.model.D = D self.model.likelihood.N = N + self.model.likelihood.D = D self.model.likelihood.Y = Y # self.model.Youter = np.dot(Y, Y.T) diff --git a/GPy/models/GPLVM.py b/GPy/models/GPLVM.py index 32594594..3f7ba113 100644 --- a/GPy/models/GPLVM.py +++ b/GPy/models/GPLVM.py @@ -67,7 +67,13 @@ class GPLVM(GP): """ util.plot.Tango.reset() - + + # this goes against the current standard in GPy, which currently is to not create + # figures in the plot() functions. I think the standard should be changed in order + # to accomodate cases like this + fig = pb.figure() + ax = fig.add_subplot(111) + if labels is None: labels = np.ones(self.N) if which_indices is None: @@ -86,15 +92,17 @@ class GPLVM(GP): input_1, input_2 = np.argsort(k.lengthscale)[:2] elif k.name=='linear': input_1, input_2 = np.argsort(k.variances)[::-1][:2] + else: + input_1, input_2 = which_indices #first, plot the output variance as a function of the latent space Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(self.X[:,[input_1, input_2]],resolution=resolution) - Xtest_full = np.zeros((Xtest.shape[0], self.X.shape[1])) - Xtest_full[:, :2] = Xtest - mu, var, low, up = self.predict(Xtest_full) - var = var[:, :2] - pb.imshow(var.reshape(resolution,resolution).T[::-1,:],extent=[xmin[0],xmax[0],xmin[1],xmax[1]],cmap=pb.cm.binary,interpolation='bilinear') - + Xtest_full = np.zeros((Xtest.shape[0], self.X.shape[1])) + Xtest_full[:, :2] = Xtest + mu, var, low, up = self.predict(Xtest_full) + var = var[:, :1] # FIXME: this was a :2 + pb.imshow(var.reshape(resolution,resolution).T[::-1,:], + extent=[xmin[0], xmax[0], xmin[1], xmax[1]], cmap=pb.cm.binary,interpolation='bilinear') for i,ul in enumerate(np.unique(labels)): if type(ul) is np.string_: @@ -121,5 +129,6 @@ class GPLVM(GP): pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) - + pb.grid(b=False) # remove the grid if present, it doesn't look good + ax.set_aspect('auto') # set a nice aspect ratio return input_1, input_2 From eeb965d136245965c82ad5591d7845fa76657c5e Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Thu, 21 Mar 2013 15:28:46 +0000 Subject: [PATCH 10/18] added BGPLVM oil flow demo and changed default X_variance init --- GPy/examples/dimensionality_reduction.py | 28 ++++++++++++++++++++++++ GPy/inference/SGD.py | 2 +- GPy/models/Bayesian_GPLVM.py | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index d7610acb..7095c55f 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -55,3 +55,31 @@ def GPLVM_oil_100(): print(m) m.plot_latent(labels=data['Y'].argmax(axis=1)) return m + + +def BGPLVM_oil(): + data = GPy.util.datasets.oil() + Y, X = data['Y'], data['X'] + X -= X.mean(axis=0) + # X /= X.std(axis=0) + + Q = 10 + M = 30 + + kernel = GPy.kern.rbf(Q, ARD = True) + GPy.kern.bias(Q) + GPy.kern.white(Q) + m = GPy.models.Bayesian_GPLVM(X, Q, kernel=kernel, M=M) + m.scale_factor = 10000.0 + m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)') + from sklearn import cluster + km = cluster.KMeans(M, verbose=10) + Z = km.fit(m.X).cluster_centers_ + # Z = GPy.util.misc.kmm_init(m.X, M) + m.set('iip', Z) + # optimize + # m.ensure_default_constraints() + + import pdb; pdb.set_trace() + m.optimize('tnc', messages=1) + print m + m.plot_latent(labels=data['Y'].argmax(axis=1)) + return m diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index 4fb12b28..b4228bff 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -242,8 +242,8 @@ class opt_SGD(Optimizer): f, step, Nj = self.step_with_missing_data(f_fp, X, step, shapes, sparse_matrix) else: Nj = N - # momentum_term = self.momentum * step # compute momentum using update(t-1) f, fp = f_fp(self.x_opt) + # momentum_term = self.momentum * step # compute momentum using update(t-1) # step = self.learning_rate * fp # compute update(t) # self.x_opt -= step + momentum_term step = self.momentum * step + self.learning_rate * fp diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 8f9759c3..6b80329c 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -27,7 +27,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): X = self.initialise_latent(init, Q, Y) if S is None: - S = np.ones_like(X) * 1e-2# + S = np.ones_like(X) * 0.5 + np.random.randn(*X.shape) * 1e-3 if Z is None: Z = np.random.permutation(X.copy())[:M] From 55ad96f38bfcf03492b360fd9a78577d558750fb Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 22 Mar 2013 15:58:02 +0000 Subject: [PATCH 11/18] made BGPLVM oil flow demo work, added ARD weights plot --- GPy/examples/dimensionality_reduction.py | 5 +-- GPy/inference/SGD.py | 45 +++++++++++------------- GPy/kern/kern.py | 21 +++++++++++ 3 files changed, 44 insertions(+), 27 deletions(-) diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 7095c55f..97abfb3e 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -61,20 +61,21 @@ def BGPLVM_oil(): data = GPy.util.datasets.oil() Y, X = data['Y'], data['X'] X -= X.mean(axis=0) - # X /= X.std(axis=0) + X /= X.std(axis=0) Q = 10 M = 30 kernel = GPy.kern.rbf(Q, ARD = True) + GPy.kern.bias(Q) + GPy.kern.white(Q) m = GPy.models.Bayesian_GPLVM(X, Q, kernel=kernel, M=M) - m.scale_factor = 10000.0 + # m.scale_factor = 100.0 m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)') from sklearn import cluster km = cluster.KMeans(M, verbose=10) Z = km.fit(m.X).cluster_centers_ # Z = GPy.util.misc.kmm_init(m.X, M) m.set('iip', Z) + m.set('bias', 1e-4) # optimize # m.ensure_default_constraints() diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index b4228bff..dd402d5e 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -33,12 +33,12 @@ class opt_SGD(Optimizer): self.self_paced = self_paced self.center = center self.param_traces = [('noise',[])] - if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: - self.param_traces.append(('bias',[])) - if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: - self.param_traces.append(('linear',[])) - if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1: - self.param_traces.append(('rbf_var',[])) + # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: + # self.param_traces.append(('bias',[])) + # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: + # self.param_traces.append(('linear',[])) + # if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1: + # self.param_traces.append(('rbf_var',[])) self.param_traces = dict(self.param_traces) self.fopt_trace = [] @@ -156,29 +156,23 @@ class opt_SGD(Optimizer): Y = self.model.likelihood.Y samples = self.non_null_samples(self.model.likelihood.Y) self.model.N = samples.sum() - - if self.center: - self.model.likelihood._mean = Y[samples].mean() - self.model.likelihood._std = Y[samples].std() - - self.model.likelihood.set_data(Y[samples]) + Y = Y[samples] else: samples = self.model.likelihood.Y.nonzero()[0] self.model.N = len(samples) Y = np.asarray(self.model.likelihood.Y[samples].todense(), dtype = np.float64) - if self.center: - self.model.likelihood._mean = Y.mean() - self.model.likelihood._std = Y.std() - self.model.likelihood.set_data(Y) + if self.model.N == 0 or Y.std() == 0.0: + return 0, step, self.model.N + + # FIXME: get rid of self.center, everything should be centered by default + self.model.likelihood._mean = Y.mean() + self.model.likelihood._std = Y.std() + self.model.likelihood.set_data(Y) - # self.model.likelihood.N = self.model.N j = self.subset_parameter_vector(self.x_opt, samples, shapes) self.model.X = X[samples] - if self.model.N == 0 or self.model.likelihood.Y.std() == 0.0: - return 0, step, self.model.N - # if self.center: # self.model.likelihood.Y -= self.model.likelihood.Y.mean() # self.model.likelihood.Y /= self.model.likelihood.Y.std() @@ -186,7 +180,8 @@ class opt_SGD(Optimizer): model_name = self.model.__class__.__name__ if model_name == 'Bayesian_GPLVM': - self.model.likelihood.trYYT = np.sum(np.square(self.model.likelihood.Y)) + self.model.likelihood.YYT = np.dot(self.model.likelihood.Y, self.model.likelihood.Y.T) + self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) b, p = self.shift_constraints(j) f, fp = f_fp(self.x_opt[j]) @@ -196,6 +191,7 @@ class opt_SGD(Optimizer): step[j] = self.momentum * step[j] + self.learning_rate[j] * fp self.x_opt[j] -= step[j] + self.restore_constraints(b, p) return f, step, self.model.N @@ -256,14 +252,13 @@ class opt_SGD(Optimizer): sys.stdout.write(status) sys.stdout.flush() last_printed_count = count - + self.param_traces['noise'].append(noise) NLL.append(f) self.fopt_trace.append(f) - for k in self.param_traces.keys(): - self.param_traces[k].append(self.model.get(k)[0]) - + # for k in self.param_traces.keys(): + # self.param_traces[k].append(self.model.get(k)[0]) # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index be45fa70..7d3b1737 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -51,6 +51,27 @@ class kern(parameterised): parameterised.__init__(self) + + def plot_ARD(self): + """ + If an ARD kernel is present, it bar-plots the ARD parameters + + + """ + for p in self.parts: + if hasattr(p, 'ARD') and p.ARD: + pb.figure() + pb.title('ARD parameters, %s kernel' % p.name) + + if p.name == 'linear': + ard_params = p.variances + else: + ard_params = 1./p.lengthscale + + pb.bar(np.arange(len(ard_params))-0.4, ard_params) + + + def _transform_gradients(self,g): x = self._get_params() g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices] From c9709cf4dac56146246b9f703537df86b6c7a4cf Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Mon, 25 Mar 2013 18:02:42 +0000 Subject: [PATCH 12/18] fixed small bug in SGD --- GPy/inference/SGD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index dd402d5e..4c7f6be2 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -247,7 +247,7 @@ class opt_SGD(Optimizer): if self.messages == 2: - noise = np.exp(self.x_opt)[-1] + noise = self.model.likelihood._variance status = "evaluating {feature: 5d}/{tot: 5d} \t f: {f: 2.3f} \t non-missing: {nm: 4d}\t noise: {noise: 2.4f}\r".format(feature = count, tot = len(features), f = f, nm = Nj, noise = noise) sys.stdout.write(status) sys.stdout.flush() From db895209ca3f898473cd912291aa81e74e4b1712 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Tue, 26 Mar 2013 11:49:20 +0000 Subject: [PATCH 13/18] added (optional) iter param dump --- GPy/inference/SGD.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index 4c7f6be2..13a325b0 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -4,8 +4,7 @@ import scipy.sparse from optimization import Optimizer from scipy import linalg, optimize import pylab as plt -import copy -import sys +import copy, sys, pickle class opt_SGD(Optimizer): """ @@ -19,7 +18,7 @@ class opt_SGD(Optimizer): """ - def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, **kwargs): + def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, **kwargs): self.opt_name = "Stochastic Gradient Descent" self.model = model @@ -33,6 +32,7 @@ class opt_SGD(Optimizer): self.self_paced = self_paced self.center = center self.param_traces = [('noise',[])] + self.iteration_file = iteration_file # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: # self.param_traces.append(('bias',[])) # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: @@ -271,8 +271,18 @@ class opt_SGD(Optimizer): # self.model.Youter = np.dot(Y, Y.T) self.trace.append(self.f_opt) + if self.iteration_file is not None: + f = open(self.iteration_file + "iteration%d.pickle" % it, 'w') + data = [self.x_opt, self.fopt_trace, self.param_traces] + pickle.dump(data, f) + f.close() + if self.messages != 0: sys.stdout.write('\r' + ' '*len(status)*2 + ' \r') status = "SGD Iteration: {0: 3d}/{1: 3d} f: {2: 2.3f}\n".format(it+1, self.iterations, self.f_opt) sys.stdout.write(status) sys.stdout.flush() + + + + From f2b49fe69aeabca41f8994badde135362021b21c Mon Sep 17 00:00:00 2001 From: Nicolas Date: Wed, 27 Mar 2013 12:45:08 +0000 Subject: [PATCH 14/18] Bug fixed in periodic kernels: Warning were not handled properly --- GPy/kern/periodic_Matern52.py | 1 + GPy/kern/periodic_exponential.py | 1 + 2 files changed, 2 insertions(+) diff --git a/GPy/kern/periodic_Matern52.py b/GPy/kern/periodic_Matern52.py index 07cb11ea..1e55ab62 100644 --- a/GPy/kern/periodic_Matern52.py +++ b/GPy/kern/periodic_Matern52.py @@ -53,6 +53,7 @@ class periodic_Matern52(kernpart): psi = np.where(r1 != 0, (np.arctan(r2/r1) + (r1<0.)*np.pi),np.arcsin(r2)) return r,omega[:,0:1], psi + @silence_errors def _int_computation(self,r1,omega1,phi1,r2,omega2,phi2): Gint1 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) + 1./(omega1-omega2.T)*( np.sin((omega1-omega2.T)*self.upper+phi1-phi2.T) - np.sin((omega1-omega2.T)*self.lower+phi1-phi2.T) ) Gint2 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) + np.cos(phi1-phi2.T)*(self.upper-self.lower) diff --git a/GPy/kern/periodic_exponential.py b/GPy/kern/periodic_exponential.py index 0018a8f9..50575ca9 100644 --- a/GPy/kern/periodic_exponential.py +++ b/GPy/kern/periodic_exponential.py @@ -53,6 +53,7 @@ class periodic_exponential(kernpart): psi = np.where(r1 != 0, (np.arctan(r2/r1) + (r1<0.)*np.pi),np.arcsin(r2)) return r,omega[:,0:1], psi + @silence_errors def _int_computation(self,r1,omega1,phi1,r2,omega2,phi2): Gint1 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) + 1./(omega1-omega2.T)*( np.sin((omega1-omega2.T)*self.upper+phi1-phi2.T) - np.sin((omega1-omega2.T)*self.lower+phi1-phi2.T) ) Gint2 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) + np.cos(phi1-phi2.T)*(self.upper-self.lower) From c7a58acd8f25520dd4b15d214be968097d743706 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Wed, 3 Apr 2013 14:24:55 +0100 Subject: [PATCH 15/18] Added testing to modules --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ef5ff58d..ee5f380c 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup(name = 'GPy', license = "BSD 3-clause", keywords = "machine-learning gaussian-processes kernels", url = "http://sheffieldml.github.com/GPy/", - packages = ['GPy', 'GPy.core', 'GPy.kern', 'GPy.util', 'GPy.models', 'GPy.inference', 'GPy.examples', 'GPy.likelihoods'], + packages = ['GPy', 'GPy.core', 'GPy.kern', 'GPy.util', 'GPy.models', 'GPy.inference', 'GPy.examples', 'GPy.likelihoods', 'GPy.testing'], package_dir={'GPy': 'GPy'}, package_data = {'GPy': ['GPy/examples']}, py_modules = ['GPy.__init__'], From 98ade75661c192e9f0423e61ec8eb780d74f7653 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 10 Apr 2013 12:14:58 +0100 Subject: [PATCH 16/18] changed version number in setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ee5f380c..c5f5c8dc 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import os from setuptools import setup # Version number -version = '0.2' +version = '0.3' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() From e1d0965866e49b82411092b4d880aaaa466bad0b Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 10 Apr 2013 12:15:23 +0100 Subject: [PATCH 17/18] changed version number in setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index c5f5c8dc..2f7c9af8 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import os from setuptools import setup # Version number -version = '0.3' +version = '0.3.2' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() From 74c0b148927690b5118858e899ff5a54d04426bd Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Wed, 10 Apr 2013 12:28:38 +0100 Subject: [PATCH 18/18] merged local branch --- GPy/examples/warped_GP_demo.py | 52 ---------------------------------- 1 file changed, 52 deletions(-) delete mode 100644 GPy/examples/warped_GP_demo.py diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py deleted file mode 100644 index 8250caca..00000000 --- a/GPy/examples/warped_GP_demo.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). -# Licensed under the BSD 3-clause license (see LICENSE.txt) - - -import numpy as np -import scipy as sp -import pdb, sys, pickle -import matplotlib.pylab as plt -import GPy -np.random.seed(2) - -N = 120 -# sample inputs and outputs -X = np.random.uniform(-np.pi,np.pi,(N,1)) -Y = np.sin(X)+np.random.randn(N,1)*0.05 -Y += np.abs(Y.min()) + 0.5 -Z = np.exp(Y)#Y**(1/3.0) -Zmax = Z.max() -Zmin = Z.min() -Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 -train = range(X.shape[0])[:100] -test = range(X.shape[0])[100:] - -kernel = GPy.kern.rbf(1) + GPy.kern.bias(1) -m = GPy.models.warpedGP(X[train], Z[train], kernel=kernel, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|rbf|noise|bias)') -m.constrain_fixed('tanh_d', 1.0) -m.randomize() -plt.figure() -plt.xlabel('predicted f(Z)') -plt.ylabel('actual f(Z)') -plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'before training') -m.optimize(messages = True) -# m.optimize_restarts(4, parallel = True, messages = True) -plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'after training') -plt.legend(loc = 0) -m.plot_warping() -plt.figure() -plt.title('warped GP fit') -m.plot() -m.optimize(messages=1) -plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Y[test].flatten(), 'x'); plt.title('prediction in unwarped space') -m.predict_in_warped_space = True -plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Z[test].flatten(), 'x'); plt.title('prediction in warped space') - -m1 = GPy.models.GP_regression(X[train], Z[train]) -m1.constrain_positive('(rbf|noise|bias)') -m1.randomize() -m1.optimize(messages = True) -plt.figure() -plt.title('GP fit') -m1.plot()