From bef114eabd592fc2b44bb823c0e8ec779f25306d Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Tue, 8 Dec 2015 13:59:46 +0000 Subject: [PATCH 01/28] (wpgs) fixing newton-raphson for f_inv and fixing plotting stuff --- GPy/models/warped_gp.py | 37 +++++++++++++++++++++--------- GPy/plotting/gpy_plot/plot_util.py | 8 ++++++- GPy/testing/model_tests.py | 29 ++++++++++------------- GPy/util/warping_functions.py | 6 ++++- 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index df947d3e..998c0ed6 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -96,11 +96,14 @@ class WarpedGP(GP): return arg1 - (arg2 ** 2) def predict(self, Xnew, which_parts='all', pred_init=None, full_cov=False, Y_metadata=None, - median=False, deg_gauss_hermite=100): - # normalize X values - # Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale + median=False, deg_gauss_hermite=100, likelihood=None): + """ + Prediction results depend on: + - The value of the self.predict_in_warped_space flag + - The median flag passed as argument + The likelihood keyword is never used, it is just to follow the plotting API. + """ mu, var = GP._raw_predict(self, Xnew) - # now push through likelihood mean, var = self.likelihood.predictive_values(mu, var) @@ -116,13 +119,11 @@ class WarpedGP(GP): else: wmean = mean wvar = var - if self.scale_data: pred = self._unscale_data(pred) - return wmean, wvar - def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None): + def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, likelihood=None, median=False): """ Get the predictive quantiles around the prediction at X @@ -137,15 +138,29 @@ class WarpedGP(GP): if self.normalizer is not None: m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) a, b = self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata) - #return [a, b] if not self.predict_in_warped_space: return [a, b] - #print a.shape new_a = self.warping_function.f_inv(a) new_b = self.warping_function.f_inv(b) - return [new_a, new_b] - #return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata) + + def log_predictive_density(self, x_test, y_test, Y_metadata=None): + """ + Calculation of the log predictive density + + .. math: + p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) + + :param x_test: test locations (x_{*}) + :type x_test: (Nx1) array + :param y_test: test observations (y_{*}) + :type y_test: (Nx1) array + :param Y_metadata: metadata associated with the test points + """ + mu_star, var_star = self._raw_predict(x_test) + fy = self.warping_function.f(y_test) + ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata) + return ll_lpd * self.warping_function.fgrad_y(y_test) if __name__ == '__main__': diff --git a/GPy/plotting/gpy_plot/plot_util.py b/GPy/plotting/gpy_plot/plot_util.py index 254886a2..8c5c04f1 100644 --- a/GPy/plotting/gpy_plot/plot_util.py +++ b/GPy/plotting/gpy_plot/plot_util.py @@ -31,6 +31,7 @@ import numpy as np from scipy import sparse import itertools +from GPy.models import WarpedGP def in_ipynb(): try: @@ -73,6 +74,9 @@ def helper_predict_with_model(self, Xgrid, plot_raw, apply_link, percentiles, wh if 'output_index' not in predict_kw['Y_metadata']: predict_kw['Y_metadata']['output_index'] = Xgrid[:,-1:].astype(np.int) + if isinstance(self, WarpedGP) and self.predict_in_warped_space: + predict_kw['median'] = True + mu, _ = self.predict(Xgrid, **predict_kw) if percentiles is not None: @@ -291,6 +295,8 @@ def get_x_y_var(model): Y = model.Y.values except AttributeError: Y = model.Y + if isinstance(model, WarpedGP) and model.predict_in_warped_space: + Y = model.Y_untransformed if sparse.issparse(Y): Y = Y.todense().view(np.ndarray) return X, X_variance, Y @@ -377,4 +383,4 @@ def x_frame2D(X,plot_limits=None,resolution=None): resolution = resolution or 50 xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution] Xnew = np.vstack((xx.flatten(),yy.flatten())).T - return Xnew, xx, yy, xmin, xmax \ No newline at end of file + return Xnew, xx, yy, xmin, xmax diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 1212d746..988bde13 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -307,42 +307,37 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(preds, warp_preds) - @unittest.skip('Comment this to plot the modified sine function') + #@unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ A test replicating the sine regression problem from Snelson's paper. """ X = (2 * np.pi) * np.random.random(151) - np.pi - Y = np.sin(X) + np.random.normal(0,0.1,151) - Y = np.exp(Y) - 5 - #Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) + 0 + Y = np.sin(X) + np.random.normal(0,0.2,151) + Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) - #np.seterr(over='raise') import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) - #warp_m['.*variance.*'].constrain_fixed(0.25) - #warp_m['.*lengthscale.*'].constrain_fixed(1) - #warp_m['warp_tanh.d'].constrain_fixed(1) - #warp_m.randomize() - #warp_m['.*warp_tanh.psi*'][:,0:2].constrain_bounded(0,100) - #warp_m['.*warp_tanh.psi*'][:,0:1].constrain_fixed(1) - - #print(warp_m.checkgrad()) - #warp_m.plot() - #plt.show() + warp_m['.*noise.variance.*'].constrain_fixed(0.1) - warp_m.optimize_restarts(parallel=True, robust=True) - #print(warp_m.checkgrad()) + m = GPy.models.GPRegression(X[:, None], Y[:, None]) + m.optimize_restarts(parallel=False, robust=True, num_restarts=5) + warp_m.optimize_restarts(parallel=False, robust=True, num_restarts=5) print(warp_m) print(warp_m['.*warp.*']) warp_m.predict_in_warped_space = False warp_m.plot() warp_m.predict_in_warped_space = True warp_m.plot() + m.plot() warp_f.plot(X.min()-10, X.max()+10) + + + + plt.show() diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 1617283c..516103bf 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -196,7 +196,11 @@ class TanhWarpingFunction_d(WarpingFunction): z = z.copy() if y is None: - y = np.ones_like(z) + # The idea here is to initialize y with +1 where + # z is positive and -1 where it is negative. + # For negative z, Newton-Raphson diverges + # if we initialize y with a positive value (and vice-versa). + y = ((z > 0) * 1.) - (z <= 0) it = 0 update = np.inf From 4c843adc4cbd05c8095ccdcf85b127cc12c856c4 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Tue, 8 Dec 2015 14:14:59 +0000 Subject: [PATCH 02/28] skipping the wgps Snelson's test (comment the skip line to see the plots) --- GPy/testing/model_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 988bde13..7f983411 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -307,7 +307,7 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(preds, warp_preds) - #@unittest.skip('Comment this to plot the modified sine function') + @unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ A test replicating the sine regression problem from From 24e9d68a19560033d040d860fb044e5d6bc18480 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 14 Jan 2016 15:28:48 +0000 Subject: [PATCH 03/28] stuff --- GPy/models/warped_gp.py | 2 +- GPy/testing/model_tests.py | 4 ++-- GPy/util/warping_functions.py | 9 +++++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 998c0ed6..443454a1 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -160,7 +160,7 @@ class WarpedGP(GP): mu_star, var_star = self._raw_predict(x_test) fy = self.warping_function.f(y_test) ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata) - return ll_lpd * self.warping_function.fgrad_y(y_test) + return ll_lpd - np.log(self.warping_function.fgrad_y(y_test)) if __name__ == '__main__': diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 7f983411..d637a8c6 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -307,7 +307,7 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(preds, warp_preds) - @unittest.skip('Comment this to plot the modified sine function') + #@unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ A test replicating the sine regression problem from @@ -321,7 +321,6 @@ class MiscTests(unittest.TestCase): warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) - warp_m['.*noise.variance.*'].constrain_fixed(0.1) m = GPy.models.GPRegression(X[:, None], Y[:, None]) m.optimize_restarts(parallel=False, robust=True, num_restarts=5) @@ -330,6 +329,7 @@ class MiscTests(unittest.TestCase): print(warp_m['.*warp.*']) warp_m.predict_in_warped_space = False warp_m.plot() + import ipdb; ipdb.set_trace() warp_m.predict_in_warped_space = True warp_m.plot() m.plot() diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 516103bf..ce99a940 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -152,7 +152,7 @@ class TanhWarpingFunction(WarpingFunction): class TanhWarpingFunction_d(WarpingFunction): - def __init__(self, n_terms=3): + def __init__(self, n_terms=3, initial_y=None): """n_terms specifies the number of tanh terms to be used""" self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + 1 @@ -165,6 +165,7 @@ class TanhWarpingFunction_d(WarpingFunction): self.d = Param('%s' % ('d'), 1.0, Logexp()) self.link_parameter(self.psi) self.link_parameter(self.d) + self.initial_y = initial_y def f(self, y): """ @@ -187,7 +188,7 @@ class TanhWarpingFunction_d(WarpingFunction): z += a*np.tanh(b*(y+c)) return z - def f_inv(self, z, max_iterations=1000, y=None): + def f_inv(self, z, max_iterations=100, y=None): """ calculate the numerical inverse of f @@ -195,12 +196,15 @@ class TanhWarpingFunction_d(WarpingFunction): """ z = z.copy() + if y is None: # The idea here is to initialize y with +1 where # z is positive and -1 where it is negative. # For negative z, Newton-Raphson diverges # if we initialize y with a positive value (and vice-versa). y = ((z > 0) * 1.) - (z <= 0) + if self.initial_y is not None: + y *= self.initial_y it = 0 update = np.inf @@ -213,6 +217,7 @@ class TanhWarpingFunction_d(WarpingFunction): it += 1 if it == max_iterations: print("WARNING!!! Maximum number of iterations reached in f_inv ") + print("Sum of updates: %.4f" % np.sum(update)) return y def fgrad_y(self, y, return_precalc=False): From c129900768eb7e363c4095dbad7603bb606f92c2 Mon Sep 17 00:00:00 2001 From: Daniel Beck Date: Mon, 22 Feb 2016 19:57:54 +0000 Subject: [PATCH 04/28] added a log warping function --- GPy/models/warped_gp.py | 20 ++++++------ GPy/util/warping_functions.py | 60 ++++++++++++++++++++++++++++++++--- 2 files changed, 66 insertions(+), 14 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 443454a1..d040f2d4 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -44,17 +44,19 @@ class WarpedGP(GP): super(WarpedGP, self).parameters_changed() Kiy = self.posterior.woodbury_vector.flatten() + + self.warping_function.update_grads(self.Y_untransformed, Kiy) - grad_y = self.warping_function.fgrad_y(self.Y_untransformed) - grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, - return_covar_chain=True) - djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) - dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) + #grad_y = self.warping_function.fgrad_y(self.Y_untransformed) + #grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, + # return_covar_chain=True) + #djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) + #dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - warping_grads = -dquad_dpsi + djac_dpsi + #warping_grads = -dquad_dpsi + djac_dpsi - self.warping_function.psi.gradient[:] = warping_grads[:, :-1] - self.warping_function.d.gradient[:] = warping_grads[0, -1] + #self.warping_function.psi.gradient[:] = warping_grads[:, :-1] + #self.warping_function.d.gradient[:] = warping_grads[0, -1] def transform_data(self): Y = self.warping_function.f(self.Y_untransformed.copy()).copy() @@ -160,7 +162,7 @@ class WarpedGP(GP): mu_star, var_star = self._raw_predict(x_test) fy = self.warping_function.f(y_test) ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata) - return ll_lpd - np.log(self.warping_function.fgrad_y(y_test)) + return ll_lpd + np.log(self.warping_function.fgrad_y(y_test)) if __name__ == '__main__': diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index ce99a940..88625757 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -278,6 +278,52 @@ class TanhWarpingFunction_d(WarpingFunction): names.append('warp_tanh_d') return names + def update_grads(self, Y_untransformed, Kiy): + grad_y = self.fgrad_y(Y_untransformed) + grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, + return_covar_chain=True) + djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) + dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) + + warping_grads = -dquad_dpsi + djac_dpsi + + self.psi.gradient[:] = warping_grads[:, :-1] + self.d.gradient[:] = warping_grads[0, -1] + + +class LogFunction(WarpingFunction): + """ + Easy wrapper for applying a fixed warping function to + positive-only values. + """ + def __init__(self): + self.num_parameters = 0 + #self.psi = Param('psi', np.zeros((1,3))) + #self.d = Param('%s' % ('d'), 0.0, Logexp()) + super(LogFunction, self).__init__(name='log') + #self.link_parameter(self.psi) + #self.link_parameter(self.d) + + + def f(self, y): + return np.log(y) + + def fgrad_y(self, y): + return 1. / y + + def update_grads(self, Y_untransformed, Kiy): + pass + + def fgrad_y_psi(self, y, return_covar_chain=False): + gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4)) + gradients = 0 + if return_covar_chain: + return gradients, gradients + return gradients + + def f_inv(self, z, y=None): + return np.exp(z) + class IdentityFunction(WarpingFunction): """ @@ -285,12 +331,12 @@ class IdentityFunction(WarpingFunction): and should not be used in practice. """ def __init__(self): - self.num_parameters = 4 - self.psi = Param('psi', np.zeros((1,3))) - self.d = Param('%s' % ('d'), 1.0, Logexp()) + self.num_parameters = 0 + #self.psi = Param('psi', np.zeros((1,3))) + #self.d = Param('%s' % ('d'), 0.0, Logexp()) super(IdentityFunction, self).__init__(name='identity') - self.link_parameter(self.psi) - self.link_parameter(self.d) + #self.link_parameter(self.psi) + #self.link_parameter(self.d) def f(self, y): @@ -299,8 +345,12 @@ class IdentityFunction(WarpingFunction): def fgrad_y(self, y): return np.ones(y.shape) + def update_grads(self, Y_untransformed, Kiy): + pass + def fgrad_y_psi(self, y, return_covar_chain=False): gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4)) + gradients = 0 if return_covar_chain: return gradients, gradients return gradients From 03e59b764d7cdfbb9a629ea25949dabbf10fa664 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:07:31 +0000 Subject: [PATCH 05/28] cleaning on warping functions --- GPy/util/warping_functions.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 88625757..147ffda4 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -153,7 +153,9 @@ class TanhWarpingFunction(WarpingFunction): class TanhWarpingFunction_d(WarpingFunction): def __init__(self, n_terms=3, initial_y=None): - """n_terms specifies the number of tanh terms to be used""" + """ + n_terms specifies the number of tanh terms to be used + """ self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + 1 self.psi = np.ones((self.n_terms, 3)) @@ -298,12 +300,7 @@ class LogFunction(WarpingFunction): """ def __init__(self): self.num_parameters = 0 - #self.psi = Param('psi', np.zeros((1,3))) - #self.d = Param('%s' % ('d'), 0.0, Logexp()) super(LogFunction, self).__init__(name='log') - #self.link_parameter(self.psi) - #self.link_parameter(self.d) - def f(self, y): return np.log(y) @@ -315,11 +312,9 @@ class LogFunction(WarpingFunction): pass def fgrad_y_psi(self, y, return_covar_chain=False): - gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4)) - gradients = 0 if return_covar_chain: - return gradients, gradients - return gradients + return 0, 0 + return 0 def f_inv(self, z, y=None): return np.exp(z) @@ -332,12 +327,7 @@ class IdentityFunction(WarpingFunction): """ def __init__(self): self.num_parameters = 0 - #self.psi = Param('psi', np.zeros((1,3))) - #self.d = Param('%s' % ('d'), 0.0, Logexp()) super(IdentityFunction, self).__init__(name='identity') - #self.link_parameter(self.psi) - #self.link_parameter(self.d) - def f(self, y): return y @@ -349,11 +339,9 @@ class IdentityFunction(WarpingFunction): pass def fgrad_y_psi(self, y, return_covar_chain=False): - gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4)) - gradients = 0 if return_covar_chain: - return gradients, gradients - return gradients + return 0, 0 + return 0 def f_inv(self, z, y=None): return z From 8deac1d05869661ede6d9325b30ef70dfd586f27 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:17:22 +0000 Subject: [PATCH 06/28] cleaning on warping functions --- GPy/util/warping_functions.py | 42 +++++++++++++++-------------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 147ffda4..a2f0dfa8 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -174,20 +174,14 @@ class TanhWarpingFunction_d(WarpingFunction): Transform y with f using parameter vector psi psi = [[a,b,c]] - :math:`f = \\sum_{terms} a * tanh(b*(y+c))` + :math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))` """ - #1. check that number of params is consistent - # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' - # assert psi.shape[1] == 4, 'inconsistent parameter dimensions' - d = self.d mpsi = self.psi - - #3. transform data - z = d*y.copy() + z = d * y.copy() for i in range(len(mpsi)): - a,b,c = mpsi[i] - z += a*np.tanh(b*(y+c)) + a, b, c = mpsi[i] + z += a * np.tanh(b * (y + c)) return z def f_inv(self, z, max_iterations=100, y=None): @@ -214,7 +208,7 @@ class TanhWarpingFunction_d(WarpingFunction): while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): fy = self.f(y) fgrady = self.fgrad_y(y) - update = (fy - z)/fgrady + update = (fy - z) / fgrady y -= update it += 1 if it == max_iterations: @@ -226,17 +220,18 @@ class TanhWarpingFunction_d(WarpingFunction): """ gradient of f w.r.t to y ([N x 1]) - :returns: Nx1 vector of derivatives, unless return_precalc is true, then it also returns the precomputed stuff + :returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff """ d = self.d mpsi = self.psi # vectorized version - S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T + S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T R = np.tanh(S) - D = 1-R**2 + D = 1 - (R ** 2) - GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T + GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T if return_precalc: return GRAD, S, R, D @@ -255,28 +250,27 @@ class TanhWarpingFunction_d(WarpingFunction): gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) for i in range(len(mpsi)): a,b,c = mpsi[i] - gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T - gradients[:,:,0,3] = 1.0 + gradients[:, :, i, 0] = (b * (1.0/np.cosh(s[i])) ** 2).T + gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T + gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T + gradients[:, :, 0, 3] = 1.0 if return_covar_chain: covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) - for i in range(len(mpsi)): a,b,c = mpsi[i] covar_grad_chain[:, :, i, 0] = (r[i]).T - covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) - covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T + covar_grad_chain[:, :, i, 1] = (a * (y + c) * ((1.0 / np.cosh(s[i])) ** 2).T) + covar_grad_chain[:, :, i, 2] = a * b * ((1.0 / np.cosh(s[i])) ** 2).T covar_grad_chain[:, :, 0, 3] = y - return gradients, covar_grad_chain return gradients def _get_param_names(self): variables = ['a', 'b', 'c', 'd'] - names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) + names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] + for q in range(self.n_terms)],[]) names.append('warp_tanh_d') return names From 9039fae29e5fce00c5fd49beedc0fc041f2a7ef6 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:24:31 +0000 Subject: [PATCH 07/28] deleted old tanh_warp and renamed warp_tanh_d to warp_tanh --- GPy/models/warped_gp.py | 4 +- GPy/testing/model_tests.py | 2 +- GPy/util/warping_functions.py | 105 +--------------------------------- 3 files changed, 5 insertions(+), 106 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index d040f2d4..50be6467 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -5,7 +5,7 @@ import numpy as np from ..util.warping_functions import * from ..core import GP from .. import likelihoods -from GPy.util.warping_functions import TanhWarpingFunction_d +from GPy.util.warping_functions import TanhWarpingFunction from GPy import kern class WarpedGP(GP): @@ -15,7 +15,7 @@ class WarpedGP(GP): kernel = kern.RBF(X.shape[1]) if warping_function == None: - self.warping_function = TanhWarpingFunction_d(warping_terms) + self.warping_function = TanhWarpingFunction(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1) else: self.warping_function = warping_function diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index d637a8c6..3ced78f2 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -319,7 +319,7 @@ class MiscTests(unittest.TestCase): import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2) + warp_f = GPy.util.warping_functions.TanhWarpingFunction(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a2f0dfa8..dd41e5ee 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -51,107 +51,6 @@ class WarpingFunction(Parameterized): class TanhWarpingFunction(WarpingFunction): - def __init__(self, n_terms=3): - """n_terms specifies the number of tanh terms to be used""" - self.n_terms = n_terms - self.num_parameters = 3 * self.n_terms - super(TanhWarpingFunction, self).__init__(name='warp_tanh') - - def f(self, y, psi): - """ - transform y with f using parameter vector psi - psi = [[a,b,c]] - ::math::`f = \\sum_{terms} a * tanh(b*(y+c))` - """ - #1. check that number of params is consistent - assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' - assert psi.shape[1] == 3, 'inconsistent parameter dimensions' - - #2. exponentiate the a and b (positive!) - mpsi = psi.copy() - - #3. transform data - z = y.copy() - for i in range(len(mpsi)): - a,b,c = mpsi[i] - z += a*np.tanh(b*(y+c)) - return z - - def f_inv(self, y, psi, iterations=10): - """ - calculate the numerical inverse of f - :param iterations: number of N.R. iterations - """ - - y = y.copy() - z = np.ones_like(y) - for i in range(iterations): - z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi) - return z - - def fgrad_y(self, y, psi, return_precalc=False): - """ - gradient of f w.r.t to y ([N x 1]) - returns: Nx1 vector of derivatives, unless return_precalc is true, - then it also returns the precomputed stuff - """ - - mpsi = psi.copy() - - # vectorized version - - # S = (mpsi[:,1]*(y + mpsi[:,2])).T - S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T - R = np.tanh(S) - D = 1-R**2 - - # GRAD = (1+(mpsi[:,0:1]*mpsi[:,1:2]*D).sum(axis=0))[:,np.newaxis] - GRAD = (1+(mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T - - if return_precalc: - # return GRAD,S.sum(axis=1),R.sum(axis=1),D.sum(axis=1) - return GRAD, S, R, D - - return GRAD - - def fgrad_y_psi(self, y, psi, return_covar_chain=False): - """ - gradient of f w.r.t to y and psi - returns: NxIx3 tensor of partial derivatives - """ - - # 1. exponentiate the a and b (positive!) - mpsi = psi.copy() - w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) - - gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 3)) - for i in range(len(mpsi)): - a,b,c = mpsi[i] - gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T - - if return_covar_chain: - covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 3)) - - for i in range(len(mpsi)): - a,b,c = mpsi[i] - covar_grad_chain[:, :, i, 0] = (r[i]).T - covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) - covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T - - return gradients, covar_grad_chain - - return gradients - - def _get_param_names(self): - variables = ['a', 'b', 'c'] - names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) - return names - - -class TanhWarpingFunction_d(WarpingFunction): - def __init__(self, n_terms=3, initial_y=None): """ n_terms specifies the number of tanh terms to be used @@ -160,7 +59,7 @@ class TanhWarpingFunction_d(WarpingFunction): self.num_parameters = 3 * self.n_terms + 1 self.psi = np.ones((self.n_terms, 3)) - super(TanhWarpingFunction_d, self).__init__(name='warp_tanh') + super(TanhWarpingFunction, self).__init__(name='warp_tanh') self.psi = Param('psi', self.psi) self.psi[:, :2].constrain_positive() @@ -271,7 +170,7 @@ class TanhWarpingFunction_d(WarpingFunction): variables = ['a', 'b', 'c', 'd'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) - names.append('warp_tanh_d') + names.append('warp_tanh') return names def update_grads(self, Y_untransformed, Kiy): From 50f72f02943f9ee476c746cdbc7d23725ff0f319 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:28:05 +0000 Subject: [PATCH 08/28] some cleaning on warped gp --- GPy/models/warped_gp.py | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 50be6467..3e5733eb 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -9,17 +9,18 @@ from GPy.util.warping_functions import TanhWarpingFunction from GPy import kern class WarpedGP(GP): + """ + This defines a GP Regression model that applies a + warping function to the output. + """ def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3): - if kernel is None: kernel = kern.RBF(X.shape[1]) - if warping_function == None: self.warping_function = TanhWarpingFunction(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1) else: self.warping_function = warping_function - self.scale_data = False if self.scale_data: Y = self._scale_data(Y) @@ -27,7 +28,6 @@ class WarpedGP(GP): self.Y_untransformed = Y.copy() self.predict_in_warped_space = True likelihood = likelihoods.Gaussian() - GP.__init__(self, X, self.transform_data(), likelihood=likelihood, kernel=kernel) self.link_parameter(self.warping_function) @@ -40,29 +40,22 @@ class WarpedGP(GP): return (Y + 0.5) * (self._Ymax - self._Ymin) + self._Ymin def parameters_changed(self): + """ + Notice that we update the warping function gradients here. + """ self.Y[:] = self.transform_data() super(WarpedGP, self).parameters_changed() - Kiy = self.posterior.woodbury_vector.flatten() - self.warping_function.update_grads(self.Y_untransformed, Kiy) - #grad_y = self.warping_function.fgrad_y(self.Y_untransformed) - #grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, - # return_covar_chain=True) - #djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) - #dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - - #warping_grads = -dquad_dpsi + djac_dpsi - - #self.warping_function.psi.gradient[:] = warping_grads[:, :-1] - #self.warping_function.d.gradient[:] = warping_grads[0, -1] - def transform_data(self): Y = self.warping_function.f(self.Y_untransformed.copy()).copy() return Y def log_likelihood(self): + """ + Notice we add the jacobian of the warping function here. + """ ll = GP.log_likelihood(self) jacobian = self.warping_function.fgrad_y(self.Y_untransformed) return ll + np.log(jacobian).sum() @@ -148,7 +141,8 @@ class WarpedGP(GP): def log_predictive_density(self, x_test, y_test, Y_metadata=None): """ - Calculation of the log predictive density + Calculation of the log predictive density. Notice we add + the jacobian of the warping function here. .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) From b266b578cd18b538363b362991f5503894ce4d21 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:29:41 +0000 Subject: [PATCH 09/28] renamed TanhWarpingFunction to TanhFunction --- GPy/models/warped_gp.py | 4 ++-- GPy/testing/model_tests.py | 2 +- GPy/util/warping_functions.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 3e5733eb..d35c1a15 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -5,7 +5,7 @@ import numpy as np from ..util.warping_functions import * from ..core import GP from .. import likelihoods -from GPy.util.warping_functions import TanhWarpingFunction +from GPy.util.warping_functions import TanhFunction from GPy import kern class WarpedGP(GP): @@ -17,7 +17,7 @@ class WarpedGP(GP): if kernel is None: kernel = kern.RBF(X.shape[1]) if warping_function == None: - self.warping_function = TanhWarpingFunction(warping_terms) + self.warping_function = TanhFunction(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1) else: self.warping_function = warping_function diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 3ced78f2..ab88d011 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -319,7 +319,7 @@ class MiscTests(unittest.TestCase): import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhWarpingFunction(n_terms=2) + warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index dd41e5ee..5bccbd91 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -49,7 +49,7 @@ class WarpingFunction(Parameterized): plt.show() -class TanhWarpingFunction(WarpingFunction): +class TanhFunction(WarpingFunction): def __init__(self, n_terms=3, initial_y=None): """ @@ -59,7 +59,7 @@ class TanhWarpingFunction(WarpingFunction): self.num_parameters = 3 * self.n_terms + 1 self.psi = np.ones((self.n_terms, 3)) - super(TanhWarpingFunction, self).__init__(name='warp_tanh') + super(TanhFunction, self).__init__(name='warp_tanh') self.psi = Param('psi', self.psi) self.psi[:, :2].constrain_positive() From b3f8f263dac0e07ec09f4f3f229a5bdcb50d7b35 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:32:32 +0000 Subject: [PATCH 10/28] doctest on TanhFunction --- GPy/util/warping_functions.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 5bccbd91..e8c99540 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -50,7 +50,12 @@ class WarpingFunction(Parameterized): class TanhFunction(WarpingFunction): - + """ + This is the function proposed in Snelson et al.: + A sum of tanh functions with linear trends outside + the range. Notice the term 'd', which scales the + linear trend. + """ def __init__(self, n_terms=3, initial_y=None): """ n_terms specifies the number of tanh terms to be used @@ -58,11 +63,9 @@ class TanhFunction(WarpingFunction): self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + 1 self.psi = np.ones((self.n_terms, 3)) - super(TanhFunction, self).__init__(name='warp_tanh') self.psi = Param('psi', self.psi) self.psi[:, :2].constrain_positive() - self.d = Param('%s' % ('d'), 1.0, Logexp()) self.link_parameter(self.psi) self.link_parameter(self.d) From 4dc6f0ec6c5e025642910dc5a9597a2b3d14577b Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 13:08:52 +0000 Subject: [PATCH 11/28] first try on logistic function --- GPy/util/warping_functions.py | 150 ++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index e8c99540..10fddb50 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -241,3 +241,153 @@ class IdentityFunction(WarpingFunction): def f_inv(self, z, y=None): return z + + +class LogisticFunction(WarpingFunction): + """ + A sum of logistic functions. + """ + def __init__(self, n_terms=3, initial_y=None): + """ + n_terms specifies the number of logistic terms to be used + """ + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + 1 + self.psi = np.ones((self.n_terms, 3)) + super(LogisticFunction, self).__init__(name='warp_logistic') + self.psi = Param('psi', self.psi) + self.psi[:, :2].constrain_positive() + self.d = Param('%s' % ('d'), 1.0, Logexp()) + self.link_parameter(self.psi) + self.link_parameter(self.d) + self.initial_y = initial_y + + def _logistic(self, x): + return 1. / (1 + np.exp(-x)) + + def f(self, y): + """ + Transform y with f using parameter vector psi + psi = [[a,b,c]] + + :math:`f = (y * d) + \\sum_{terms} a * logistic(b *(y + c))` + """ + d = self.d + mpsi = self.psi + z = d * y.copy() + for i in xrange(self.n_terms): + a, b, c = mpsi[i] + z += a * self._logistic(b * (y + c)) + return z + + def f_inv(self, z, max_iterations=100, y=None): + """ + calculate the numerical inverse of f + + :param max_iterations: maximum number of N.R. iterations + """ + z = z.copy() + if y is None: + # The idea here is to initialize y with +1 where + # z is positive and -1 where it is negative. + # For negative z, Newton-Raphson diverges + # if we initialize y with a positive value (and vice-versa). + y = ((z > 0) * 1.) - (z <= 0) + if self.initial_y is not None: + y *= self.initial_y + it = 0 + update = np.inf + while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): + fy = self.f(y) + fgrady = self.fgrad_y(y) + update = (fy - z) / fgrady + y -= update + it += 1 + if it == max_iterations: + print("WARNING!!! Maximum number of iterations reached in f_inv ") + print("Sum of updates: %.4f" % np.sum(update)) + return y + + def fgrad_y(self, y, return_precalc=False): + """ + Gradient of f w.r.t to y ([N x 1]) + This vectorized version calculates all summation terms + at the same time (since the grad of a sum is the sum of the grads). + + :returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + d = self.d + mpsi = self.psi + + # vectorized version + # term = b * (y + c) + term = (mpsi[:, 1] * (y[:, :, None] + mpsi[:, 2])).T + logistic_term = self._logistic(term) + logistic_grad = logistic_term - (logistic_term ** 2) + + #S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T + #R = np.tanh(S) + #D = 1 - (R ** 2) + + #GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T + grad = (d + (mpsi[:, :1][:, : ,None] * mpsi[:, 1:2][:, :, None] * + logistic_grad).sum(axis=0)).T + + if return_precalc: + return grad, logistic_term, logistic_grad + return grad + + def fgrad_y_psi(self, y, psi): + """ + gradient of f w.r.t to y and psi + + :returns: NxIx4 tensor of partial derivatives + """ + mpsi = self.psi + + grad, l_term, l_grad = self.fgrad_y(y, return_precalc=True) + gradients = np.zeros((y.shape[0], y.shape[1], self.n_terms, 4)) + for i in xrange(self.n_terms): + a, b, c = mpsi[i] + gradients[:, :, i, 0] = b * l_grad[i].T + b2l_term = b - (2 * l_term[i]) + al_grad = a * l_grad[i].T + #gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T + gradients[:, :, i, 1] = (1 + ((y + c) * b2l_term)) * al_grad + #gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T + gradients[:, :, i, 2] = b * b2l_term * al_grad + gradients[:, :, 0, 3] = 1.0 + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in xrange(self.n_terms): + a, b, c = mpsi[i] + covar_grad_chain[:, :, i, 0] = l_term[i].T + al_grad = a * l_grad[i].T + covar_grad_chain[:, :, i, 1] = (y + c) * al_grad + covar_grad_chain[:, :, i, 2] = b * al_grad + covar_grad_chain[:, :, 0, 3] = y + return gradients, covar_grad_chain + + return gradients + + def _get_param_names(self): + variables = ['a', 'b', 'c', 'd'] + names = sum([['warp_logistic_%s_t%i' % (variables[n],q) for n in range(3)] + for q in range(self.n_terms)],[]) + names.append('warp_logistic') + return names + + def update_grads(self, Y_untransformed, Kiy): + grad_y = self.fgrad_y(Y_untransformed) + grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, + return_covar_chain=True) + djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) + dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) + + warping_grads = -dquad_dpsi + djac_dpsi + + self.psi.gradient[:] = warping_grads[:, :-1] + self.d.gradient[:] = warping_grads[0, -1] + From 91e625a9bdf549934dcac75b8ab4820a37a9dcdf Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 13:27:19 +0000 Subject: [PATCH 12/28] logistic seems working but more tests are needed --- GPy/testing/model_tests.py | 4 +++- GPy/util/warping_functions.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index ab88d011..4b9d11ee 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -316,10 +316,12 @@ class MiscTests(unittest.TestCase): X = (2 * np.pi) * np.random.random(151) - np.pi Y = np.sin(X) + np.random.normal(0,0.2,151) Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) + Y = np.abs(Y) import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) + #warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) + warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 10fddb50..c62ed08e 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -338,7 +338,7 @@ class LogisticFunction(WarpingFunction): return grad, logistic_term, logistic_grad return grad - def fgrad_y_psi(self, y, psi): + def fgrad_y_psi(self, y, return_covar_chain=False): """ gradient of f w.r.t to y and psi @@ -351,7 +351,7 @@ class LogisticFunction(WarpingFunction): for i in xrange(self.n_terms): a, b, c = mpsi[i] gradients[:, :, i, 0] = b * l_grad[i].T - b2l_term = b - (2 * l_term[i]) + b2l_term = b - (2 * l_term[i].T) al_grad = a * l_grad[i].T #gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T gradients[:, :, i, 1] = (1 + ((y + c) * b2l_term)) * al_grad From 5534c45b0a3b7cab3ea45e7801510ad73db53cb1 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 2 Mar 2016 16:54:03 +0000 Subject: [PATCH 13/28] added a rate to inverse calculation --- GPy/models/warped_gp.py | 14 ++-- GPy/testing/model_tests.py | 32 +++++++- GPy/util/warping_functions.py | 150 +++++++++++++++++++++++++++++++--- 3 files changed, 173 insertions(+), 23 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index d35c1a15..419a4104 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -68,22 +68,22 @@ class WarpedGP(GP): arg2 = np.ones(shape=gh_samples.shape).dot(mean.T) return self.warping_function.f_inv(arg1 + arg2, y=pred_init) - def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=100): + def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=20): """ Calculate the warped mean by using Gauss-Hermite quadrature. """ gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite) - gh_samples = gh_samples[:,None] - gh_weights = gh_weights[None,:] + gh_samples = gh_samples[:, None] + gh_weights = gh_weights[None, :] return gh_weights.dot(self._get_warped_term(mean, std, gh_samples)) / np.sqrt(np.pi) - def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=100): + def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=20): """ Calculate the warped variance by using Gauss-Hermite quadrature. """ gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite) - gh_samples = gh_samples[:,None] - gh_weights = gh_weights[None,:] + gh_samples = gh_samples[:, None] + gh_weights = gh_weights[None, :] arg1 = gh_weights.dot(self._get_warped_term(mean, std, gh_samples, pred_init=pred_init) ** 2) / np.sqrt(np.pi) arg2 = self._get_warped_mean(mean, std, pred_init=pred_init, @@ -91,7 +91,7 @@ class WarpedGP(GP): return arg1 - (arg2 ** 2) def predict(self, Xnew, which_parts='all', pred_init=None, full_cov=False, Y_metadata=None, - median=False, deg_gauss_hermite=100, likelihood=None): + median=False, deg_gauss_hermite=20, likelihood=None): """ Prediction results depend on: - The value of the self.predict_in_warped_space flag diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 4b9d11ee..2b9add02 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -307,6 +307,31 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(preds, warp_preds) + def test_warped_gp_log(self): + """ + A WarpedGP with the log warping function should be + equal to a standard GP with log labels. + """ + k = GPy.kern.RBF(1) + Y = np.abs(self.Y) + logY = np.log(Y) + m = GPy.models.GPRegression(self.X, logY, kernel=k) + #m.optimize() + m['Gaussian_noise.variance'] = 1e-4 + preds = m.predict(self.X)[0] + + warp_k = GPy.kern.RBF(1) + warp_f = GPy.util.warping_functions.LogFunction() + warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, warping_function=warp_f) + warp_m.optimize() + warp_m['.*'] = 1.0 + warp_m['Gaussian_noise.variance'] = 1e-4 + warp_preds = warp_m.predict(self.X, median=True)[0] + #print np.exp(preds) + #print warp_preds + + np.testing.assert_almost_equal(np.exp(preds), warp_preds) + #@unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ @@ -316,12 +341,13 @@ class MiscTests(unittest.TestCase): X = (2 * np.pi) * np.random.random(151) - np.pi Y = np.sin(X) + np.random.normal(0,0.2,151) Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) - Y = np.abs(Y) + #Y = np.abs(Y) import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) - #warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) - warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2) + warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) + #warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2) + #warp_f = GPy.util.warping_functions.LogitFunction(n_terms=1) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index c62ed08e..548877e5 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -70,6 +70,7 @@ class TanhFunction(WarpingFunction): self.link_parameter(self.psi) self.link_parameter(self.d) self.initial_y = initial_y + self.rate = 0.1 def f(self, y): """ @@ -94,28 +95,19 @@ class TanhFunction(WarpingFunction): """ z = z.copy() - - if y is None: - # The idea here is to initialize y with +1 where - # z is positive and -1 where it is negative. - # For negative z, Newton-Raphson diverges - # if we initialize y with a positive value (and vice-versa). - y = ((z > 0) * 1.) - (z <= 0) - if self.initial_y is not None: - y *= self.initial_y + y = np.ones_like(z) it = 0 update = np.inf - - while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): + while np.abs(update).sum() > 1e-10 and it < max_iterations: fy = self.f(y) fgrady = self.fgrad_y(y) update = (fy - z) / fgrady - y -= update + y -= self.rate * update it += 1 if it == max_iterations: print("WARNING!!! Maximum number of iterations reached in f_inv ") - print("Sum of updates: %.4f" % np.sum(update)) + print("Sum of roots: %.4f" % np.sum(fy - z)) return y def fgrad_y(self, y, return_precalc=False): @@ -391,3 +383,135 @@ class LogisticFunction(WarpingFunction): self.psi.gradient[:] = warping_grads[:, :-1] self.d.gradient[:] = warping_grads[0, -1] + +class LogitFunction(WarpingFunction): + """ + A sum of logit functions. + """ + def __init__(self, n_terms=1, initial_y=None): + """ + n_terms specifies the number of logistic terms to be used + """ + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + self.psi = np.ones((self.n_terms, 3)) + super(LogitFunction, self).__init__(name='warp_logit') + self.psi = Param('psi', self.psi) + self.psi[:, :2].constrain_positive() + self.link_parameter(self.psi) + self.initial_y = initial_y + self.e = 1e-5 + + def _logit(self, y): + a, b, c = self.psi[0] + y += self.e + return ((np.log(y) - np.log(a - y)) / b) + c + + def f(self, y): + """ + Transform y with f using parameter vector psi + psi = [[a,b,c]] + + :math:`f = (y * d) + \\sum_{terms} a * logistic(b *(y + c))` + """ + z = np.zeros_like(y) + for i in xrange(self.n_terms): + a, b, c = self.psi[i] + z += self._logit(y) + return z + + def f_inv(self, z, max_iterations=100, y=None): + """ + calculate the numerical inverse of f + + :param max_iterations: maximum number of N.R. iterations + """ + z = z.copy() + if y is None: + # The idea here is to initialize y with +1 where + # z is positive and -1 where it is negative. + # For negative z, Newton-Raphson diverges + # if we initialize y with a positive value (and vice-versa). + y = ((z > 0) * 1.) - (z <= 0) + if self.initial_y is not None: + y *= self.initial_y + it = 0 + update = np.inf + while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): + fy = self.f(y) + fgrady = self.fgrad_y(y) + update = (fy - z) / fgrady + y -= update + it += 1 + if it == max_iterations: + print("WARNING!!! Maximum number of iterations reached in f_inv ") + print("Sum of updates: %.4f" % np.sum(update)) + return y + + def fgrad_y(self, y, return_precalc=False): + """ + Gradient of f w.r.t to y ([N x 1]) + This vectorized version calculates all summation terms + at the same time (since the grad of a sum is the sum of the grads). + + :returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + a, b, c = self.psi[0] + + # vectorized version + # term = b * (y + c) + y += self.e + yb = y * b + ab = a * b + grad = (1. / yb) + (1. / (ab - yb)) + + if return_precalc: + return grad, yb, ab + return grad + + def fgrad_y_psi(self, y, return_covar_chain=False): + """ + gradient of f w.r.t to y and psi + + :returns: NxIx4 tensor of partial derivatives + """ + grad, yb, ab = self.fgrad_y(y, return_precalc=True) + gradients = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3)) + for i in xrange(self.n_terms): + a, b, c = self.psi[i] + gradients[:, :, i, 0] = -b / ((ab - yb) ** 2) + yb2 = y * (b ** 2) + ab2 = a * (b ** 2) + gradients[:, :, i, 1] = - (1. / yb2) - (1. / (ab2 - yb2)) + #gradients[:, :, i, 2] = 0.0 + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3)) + for i in xrange(self.n_terms): + a, b, c = self.psi[i] + covar_grad_chain[:, :, i, 0] = - (1. / (ab - yb)) + covar_grad_chain[:, :, i, 1] = - (np.log(y) - np.log(a - y)) / (b ** 2) + covar_grad_chain[:, :, i, 2] = 1.0 + return gradients, covar_grad_chain + + return gradients + + def _get_param_names(self): + variables = ['a', 'b', 'c'] + names = sum([['warp_logit_%s_t%i' % (variables[n],q) for n in range(3)] + for q in range(self.n_terms)],[]) + names.append('warp_logit') + return names + + def update_grads(self, Y_untransformed, Kiy): + grad_y = self.fgrad_y(Y_untransformed) + grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, + return_covar_chain=True) + djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) + dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) + + warping_grads = -dquad_dpsi + djac_dpsi + + self.psi.gradient[:] = warping_grads[:, :] + From 7bee3daac83ef91001cfc007959ee787e0b961c7 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 2 Mar 2016 16:55:26 +0000 Subject: [PATCH 14/28] cleaning --- GPy/testing/model_tests.py | 5 +- GPy/util/warping_functions.py | 281 ---------------------------------- 2 files changed, 1 insertion(+), 285 deletions(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 2b9add02..83487373 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -332,7 +332,7 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(np.exp(preds), warp_preds) - #@unittest.skip('Comment this to plot the modified sine function') + @unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ A test replicating the sine regression problem from @@ -341,13 +341,10 @@ class MiscTests(unittest.TestCase): X = (2 * np.pi) * np.random.random(151) - np.pi Y = np.sin(X) + np.random.normal(0,0.2,151) Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) - #Y = np.abs(Y) import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) - #warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2) - #warp_f = GPy.util.warping_functions.LogitFunction(n_terms=1) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 548877e5..f98f0f08 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -234,284 +234,3 @@ class IdentityFunction(WarpingFunction): def f_inv(self, z, y=None): return z - -class LogisticFunction(WarpingFunction): - """ - A sum of logistic functions. - """ - def __init__(self, n_terms=3, initial_y=None): - """ - n_terms specifies the number of logistic terms to be used - """ - self.n_terms = n_terms - self.num_parameters = 3 * self.n_terms + 1 - self.psi = np.ones((self.n_terms, 3)) - super(LogisticFunction, self).__init__(name='warp_logistic') - self.psi = Param('psi', self.psi) - self.psi[:, :2].constrain_positive() - self.d = Param('%s' % ('d'), 1.0, Logexp()) - self.link_parameter(self.psi) - self.link_parameter(self.d) - self.initial_y = initial_y - - def _logistic(self, x): - return 1. / (1 + np.exp(-x)) - - def f(self, y): - """ - Transform y with f using parameter vector psi - psi = [[a,b,c]] - - :math:`f = (y * d) + \\sum_{terms} a * logistic(b *(y + c))` - """ - d = self.d - mpsi = self.psi - z = d * y.copy() - for i in xrange(self.n_terms): - a, b, c = mpsi[i] - z += a * self._logistic(b * (y + c)) - return z - - def f_inv(self, z, max_iterations=100, y=None): - """ - calculate the numerical inverse of f - - :param max_iterations: maximum number of N.R. iterations - """ - z = z.copy() - if y is None: - # The idea here is to initialize y with +1 where - # z is positive and -1 where it is negative. - # For negative z, Newton-Raphson diverges - # if we initialize y with a positive value (and vice-versa). - y = ((z > 0) * 1.) - (z <= 0) - if self.initial_y is not None: - y *= self.initial_y - it = 0 - update = np.inf - while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): - fy = self.f(y) - fgrady = self.fgrad_y(y) - update = (fy - z) / fgrady - y -= update - it += 1 - if it == max_iterations: - print("WARNING!!! Maximum number of iterations reached in f_inv ") - print("Sum of updates: %.4f" % np.sum(update)) - return y - - def fgrad_y(self, y, return_precalc=False): - """ - Gradient of f w.r.t to y ([N x 1]) - This vectorized version calculates all summation terms - at the same time (since the grad of a sum is the sum of the grads). - - :returns: Nx1 vector of derivatives, unless return_precalc is true, - then it also returns the precomputed stuff - """ - d = self.d - mpsi = self.psi - - # vectorized version - # term = b * (y + c) - term = (mpsi[:, 1] * (y[:, :, None] + mpsi[:, 2])).T - logistic_term = self._logistic(term) - logistic_grad = logistic_term - (logistic_term ** 2) - - #S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T - #R = np.tanh(S) - #D = 1 - (R ** 2) - - #GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T - grad = (d + (mpsi[:, :1][:, : ,None] * mpsi[:, 1:2][:, :, None] * - logistic_grad).sum(axis=0)).T - - if return_precalc: - return grad, logistic_term, logistic_grad - return grad - - def fgrad_y_psi(self, y, return_covar_chain=False): - """ - gradient of f w.r.t to y and psi - - :returns: NxIx4 tensor of partial derivatives - """ - mpsi = self.psi - - grad, l_term, l_grad = self.fgrad_y(y, return_precalc=True) - gradients = np.zeros((y.shape[0], y.shape[1], self.n_terms, 4)) - for i in xrange(self.n_terms): - a, b, c = mpsi[i] - gradients[:, :, i, 0] = b * l_grad[i].T - b2l_term = b - (2 * l_term[i].T) - al_grad = a * l_grad[i].T - #gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T - gradients[:, :, i, 1] = (1 + ((y + c) * b2l_term)) * al_grad - #gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T - gradients[:, :, i, 2] = b * b2l_term * al_grad - gradients[:, :, 0, 3] = 1.0 - - if return_covar_chain: - covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) - for i in xrange(self.n_terms): - a, b, c = mpsi[i] - covar_grad_chain[:, :, i, 0] = l_term[i].T - al_grad = a * l_grad[i].T - covar_grad_chain[:, :, i, 1] = (y + c) * al_grad - covar_grad_chain[:, :, i, 2] = b * al_grad - covar_grad_chain[:, :, 0, 3] = y - return gradients, covar_grad_chain - - return gradients - - def _get_param_names(self): - variables = ['a', 'b', 'c', 'd'] - names = sum([['warp_logistic_%s_t%i' % (variables[n],q) for n in range(3)] - for q in range(self.n_terms)],[]) - names.append('warp_logistic') - return names - - def update_grads(self, Y_untransformed, Kiy): - grad_y = self.fgrad_y(Y_untransformed) - grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, - return_covar_chain=True) - djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) - dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - - warping_grads = -dquad_dpsi + djac_dpsi - - self.psi.gradient[:] = warping_grads[:, :-1] - self.d.gradient[:] = warping_grads[0, -1] - - -class LogitFunction(WarpingFunction): - """ - A sum of logit functions. - """ - def __init__(self, n_terms=1, initial_y=None): - """ - n_terms specifies the number of logistic terms to be used - """ - self.n_terms = n_terms - self.num_parameters = 3 * self.n_terms - self.psi = np.ones((self.n_terms, 3)) - super(LogitFunction, self).__init__(name='warp_logit') - self.psi = Param('psi', self.psi) - self.psi[:, :2].constrain_positive() - self.link_parameter(self.psi) - self.initial_y = initial_y - self.e = 1e-5 - - def _logit(self, y): - a, b, c = self.psi[0] - y += self.e - return ((np.log(y) - np.log(a - y)) / b) + c - - def f(self, y): - """ - Transform y with f using parameter vector psi - psi = [[a,b,c]] - - :math:`f = (y * d) + \\sum_{terms} a * logistic(b *(y + c))` - """ - z = np.zeros_like(y) - for i in xrange(self.n_terms): - a, b, c = self.psi[i] - z += self._logit(y) - return z - - def f_inv(self, z, max_iterations=100, y=None): - """ - calculate the numerical inverse of f - - :param max_iterations: maximum number of N.R. iterations - """ - z = z.copy() - if y is None: - # The idea here is to initialize y with +1 where - # z is positive and -1 where it is negative. - # For negative z, Newton-Raphson diverges - # if we initialize y with a positive value (and vice-versa). - y = ((z > 0) * 1.) - (z <= 0) - if self.initial_y is not None: - y *= self.initial_y - it = 0 - update = np.inf - while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): - fy = self.f(y) - fgrady = self.fgrad_y(y) - update = (fy - z) / fgrady - y -= update - it += 1 - if it == max_iterations: - print("WARNING!!! Maximum number of iterations reached in f_inv ") - print("Sum of updates: %.4f" % np.sum(update)) - return y - - def fgrad_y(self, y, return_precalc=False): - """ - Gradient of f w.r.t to y ([N x 1]) - This vectorized version calculates all summation terms - at the same time (since the grad of a sum is the sum of the grads). - - :returns: Nx1 vector of derivatives, unless return_precalc is true, - then it also returns the precomputed stuff - """ - a, b, c = self.psi[0] - - # vectorized version - # term = b * (y + c) - y += self.e - yb = y * b - ab = a * b - grad = (1. / yb) + (1. / (ab - yb)) - - if return_precalc: - return grad, yb, ab - return grad - - def fgrad_y_psi(self, y, return_covar_chain=False): - """ - gradient of f w.r.t to y and psi - - :returns: NxIx4 tensor of partial derivatives - """ - grad, yb, ab = self.fgrad_y(y, return_precalc=True) - gradients = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3)) - for i in xrange(self.n_terms): - a, b, c = self.psi[i] - gradients[:, :, i, 0] = -b / ((ab - yb) ** 2) - yb2 = y * (b ** 2) - ab2 = a * (b ** 2) - gradients[:, :, i, 1] = - (1. / yb2) - (1. / (ab2 - yb2)) - #gradients[:, :, i, 2] = 0.0 - - if return_covar_chain: - covar_grad_chain = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3)) - for i in xrange(self.n_terms): - a, b, c = self.psi[i] - covar_grad_chain[:, :, i, 0] = - (1. / (ab - yb)) - covar_grad_chain[:, :, i, 1] = - (np.log(y) - np.log(a - y)) / (b ** 2) - covar_grad_chain[:, :, i, 2] = 1.0 - return gradients, covar_grad_chain - - return gradients - - def _get_param_names(self): - variables = ['a', 'b', 'c'] - names = sum([['warp_logit_%s_t%i' % (variables[n],q) for n in range(3)] - for q in range(self.n_terms)],[]) - names.append('warp_logit') - return names - - def update_grads(self, Y_untransformed, Kiy): - grad_y = self.fgrad_y(Y_untransformed) - grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, - return_covar_chain=True) - djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) - dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - - warping_grads = -dquad_dpsi + djac_dpsi - - self.psi.gradient[:] = warping_grads[:, :] - From 6bea908234058b3fe57fd1dc61fa16e19ce74d1a Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 2 Mar 2016 17:05:22 +0000 Subject: [PATCH 15/28] refactored the numeric inverse into the mother class, to test Identity and Log --- GPy/testing/model_tests.py | 21 +++-------- GPy/util/warping_functions.py | 70 +++++++++++++++++++---------------- 2 files changed, 43 insertions(+), 48 deletions(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 83487373..351aa210 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -300,12 +300,12 @@ class MiscTests(unittest.TestCase): preds = m.predict(self.X) warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.IdentityFunction() + warp_f = GPy.util.warping_functions.IdentityFunction(closed_inverse=False) warp_m = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k, warping_function=warp_f) warp_m.optimize() warp_preds = warp_m.predict(self.X) - np.testing.assert_almost_equal(preds, warp_preds) + np.testing.assert_almost_equal(preds, warp_preds, decimal=4) def test_warped_gp_log(self): """ @@ -316,21 +316,16 @@ class MiscTests(unittest.TestCase): Y = np.abs(self.Y) logY = np.log(Y) m = GPy.models.GPRegression(self.X, logY, kernel=k) - #m.optimize() - m['Gaussian_noise.variance'] = 1e-4 + m.optimize() preds = m.predict(self.X)[0] warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.LogFunction() + warp_f = GPy.util.warping_functions.LogFunction(closed_inverse=False) warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, warping_function=warp_f) warp_m.optimize() - warp_m['.*'] = 1.0 - warp_m['Gaussian_noise.variance'] = 1e-4 warp_preds = warp_m.predict(self.X, median=True)[0] - #print np.exp(preds) - #print warp_preds - np.testing.assert_almost_equal(np.exp(preds), warp_preds) + np.testing.assert_almost_equal(np.exp(preds), warp_preds, decimal=4) @unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): @@ -354,17 +349,11 @@ class MiscTests(unittest.TestCase): print(warp_m['.*warp.*']) warp_m.predict_in_warped_space = False warp_m.plot() - import ipdb; ipdb.set_trace() warp_m.predict_in_warped_space = True warp_m.plot() m.plot() warp_f.plot(X.min()-10, X.max()+10) - - - - plt.show() - class GradientTests(np.testing.TestCase): diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index f98f0f08..13cda810 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -14,6 +14,7 @@ class WarpingFunction(Parameterized): def __init__(self, name): super(WarpingFunction, self).__init__(name=name) + self.rate = 0.1 def f(self, y, psi): """function transformation @@ -29,9 +30,30 @@ class WarpingFunction(Parameterized): """gradient of f w.r.t to y""" raise NotImplementedError - def f_inv(self, z, psi): - """inverse function transformation""" - raise NotImplementedError + def f_inv(self, z, max_iterations=100, y=None): + """ + Calculate the numerical inverse of f. This should be + overwritten for specific warping functions where the + inverse can be found in closed form. + + :param max_iterations: maximum number of N.R. iterations + """ + + z = z.copy() + y = np.ones_like(z) + + it = 0 + update = np.inf + while np.abs(update).sum() > 1e-10 and it < max_iterations: + fy = self.f(y) + fgrady = self.fgrad_y(y) + update = (fy - z) / fgrady + y -= self.rate * update + it += 1 + if it == max_iterations: + print("WARNING!!! Maximum number of iterations reached in f_inv ") + print("Sum of roots: %.4f" % np.sum(fy - z)) + return y def _get_param_names(self): raise NotImplementedError @@ -70,7 +92,6 @@ class TanhFunction(WarpingFunction): self.link_parameter(self.psi) self.link_parameter(self.d) self.initial_y = initial_y - self.rate = 0.1 def f(self, y): """ @@ -87,29 +108,6 @@ class TanhFunction(WarpingFunction): z += a * np.tanh(b * (y + c)) return z - def f_inv(self, z, max_iterations=100, y=None): - """ - calculate the numerical inverse of f - - :param max_iterations: maximum number of N.R. iterations - """ - - z = z.copy() - y = np.ones_like(z) - - it = 0 - update = np.inf - while np.abs(update).sum() > 1e-10 and it < max_iterations: - fy = self.f(y) - fgrady = self.fgrad_y(y) - update = (fy - z) / fgrady - y -= self.rate * update - it += 1 - if it == max_iterations: - print("WARNING!!! Maximum number of iterations reached in f_inv ") - print("Sum of roots: %.4f" % np.sum(fy - z)) - return y - def fgrad_y(self, y, return_precalc=False): """ gradient of f w.r.t to y ([N x 1]) @@ -183,12 +181,16 @@ class TanhFunction(WarpingFunction): class LogFunction(WarpingFunction): """ - Easy wrapper for applying a fixed warping function to + Easy wrapper for applying a fixed log warping function to positive-only values. + The closed_inverse flag should only be set to False for + debugging and testing purposes. """ - def __init__(self): + def __init__(self, closed_inverse=True): self.num_parameters = 0 super(LogFunction, self).__init__(name='log') + if closed_inverse: + self.f_inv = self._f_inv def f(self, y): return np.log(y) @@ -204,7 +206,7 @@ class LogFunction(WarpingFunction): return 0, 0 return 0 - def f_inv(self, z, y=None): + def _f_inv(self, z, y=None): return np.exp(z) @@ -212,10 +214,14 @@ class IdentityFunction(WarpingFunction): """ Identity warping function. This is for testing and sanity check purposes and should not be used in practice. + The closed_inverse flag should only be set to False for + debugging and testing purposes. """ - def __init__(self): + def __init__(self, closed_inverse=True): self.num_parameters = 0 super(IdentityFunction, self).__init__(name='identity') + if closed_inverse: + self.f_inv = self._f_inv def f(self, y): return y @@ -231,6 +237,6 @@ class IdentityFunction(WarpingFunction): return 0, 0 return 0 - def f_inv(self, z, y=None): + def _f_inv(self, z, y=None): return z From 2ecd13b08d93448167fc66e5cd3cfff58916083d Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Mon, 7 Mar 2016 10:46:42 +0000 Subject: [PATCH 16/28] moved cubic sine from tests to examples --- GPy/examples/regression.py | 26 ++++++++++++++++++++++++++ GPy/testing/model_tests.py | 29 +---------------------------- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 11734564..b6bc07a0 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -550,3 +550,29 @@ def parametric_mean_function(max_iters=100, optimize=True, plot=True): return m +def warped_gp_cubic_sine(max_iters=100): + """ + A test replicating the sine regression problem from + Snelson's paper. + """ + X = (2 * np.pi) * np.random.random(151) - np.pi + Y = np.sin(X) + np.random.normal(0,0.2,151) + Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) + + warp_k = GPy.kern.RBF(1) + warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) + warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) + + m = GPy.models.GPRegression(X[:, None], Y[:, None]) + m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) + warp_m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) + print(warp_m) + print(warp_m['.*warp.*']) + warp_m.predict_in_warped_space = False + warp_m.plot(title="Warped GP - Latent space") + warp_m.predict_in_warped_space = True + warp_m.plot(title="Warped GP - Warped space") + m.plot(title="Standard GP") + #warp_f.plot(X.min()-10, X.max()+10) + warp_m.plot_warping() + pb.show() diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 351aa210..5ec64e15 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -311,6 +311,7 @@ class MiscTests(unittest.TestCase): """ A WarpedGP with the log warping function should be equal to a standard GP with log labels. + Note that we predict the median here. """ k = GPy.kern.RBF(1) Y = np.abs(self.Y) @@ -327,34 +328,6 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(np.exp(preds), warp_preds, decimal=4) - @unittest.skip('Comment this to plot the modified sine function') - def test_warped_gp_sine(self): - """ - A test replicating the sine regression problem from - Snelson's paper. - """ - X = (2 * np.pi) * np.random.random(151) - np.pi - Y = np.sin(X) + np.random.normal(0,0.2,151) - Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) - - import matplotlib.pyplot as plt - warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) - warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) - - m = GPy.models.GPRegression(X[:, None], Y[:, None]) - m.optimize_restarts(parallel=False, robust=True, num_restarts=5) - warp_m.optimize_restarts(parallel=False, robust=True, num_restarts=5) - print(warp_m) - print(warp_m['.*warp.*']) - warp_m.predict_in_warped_space = False - warp_m.plot() - warp_m.predict_in_warped_space = True - warp_m.plot() - m.plot() - warp_f.plot(X.min()-10, X.max()+10) - plt.show() - class GradientTests(np.testing.TestCase): def setUp(self): From 419f2fb284c987cfa19cf3306c6578143430b6f4 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Mon, 7 Mar 2016 10:53:17 +0000 Subject: [PATCH 17/28] cleaning --- GPy/examples/regression.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index b6bc07a0..261d5a49 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -552,27 +552,28 @@ def parametric_mean_function(max_iters=100, optimize=True, plot=True): def warped_gp_cubic_sine(max_iters=100): """ - A test replicating the sine regression problem from + A test replicating the cubic sine regression problem from Snelson's paper. """ X = (2 * np.pi) * np.random.random(151) - np.pi Y = np.sin(X) + np.random.normal(0,0.2,151) Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) - + X = X[:, None] + Y = Y[:, None] + warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) - warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) - - m = GPy.models.GPRegression(X[:, None], Y[:, None]) + warp_m = GPy.models.WarpedGP(X, Y, kernel=warp_k, warping_function=warp_f) + m = GPy.models.GPRegression(X, Y) m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) warp_m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) print(warp_m) print(warp_m['.*warp.*']) + warp_m.predict_in_warped_space = False warp_m.plot(title="Warped GP - Latent space") warp_m.predict_in_warped_space = True warp_m.plot(title="Warped GP - Warped space") m.plot(title="Standard GP") - #warp_f.plot(X.min()-10, X.max()+10) warp_m.plot_warping() pb.show() From 0d943ac98ec3b4a68e69a9f73acf5370b2445e7b Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Mon, 14 Mar 2016 17:22:23 +0000 Subject: [PATCH 18/28] added a log-tanh function --- GPy/models/warped_gp.py | 5 ++ GPy/util/warping_functions.py | 106 +++++++++++++++++++++++++++++++++- 2 files changed, 110 insertions(+), 1 deletion(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 419a4104..2e93ddd5 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -5,6 +5,7 @@ import numpy as np from ..util.warping_functions import * from ..core import GP from .. import likelihoods +from paramz import ObsAr from GPy.util.warping_functions import TanhFunction from GPy import kern @@ -31,6 +32,10 @@ class WarpedGP(GP): GP.__init__(self, X, self.transform_data(), likelihood=likelihood, kernel=kernel) self.link_parameter(self.warping_function) + def set_XY(self, X=None, Y=None): + self.Y_untransformed = Y.copy() + GP.set_XY(self, X, self.transform_data()) + def _scale_data(self, Y): self._Ymax = Y.max() self._Ymin = Y.min() diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 13cda810..ab7e2547 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -59,7 +59,6 @@ class WarpingFunction(Parameterized): raise NotImplementedError def plot(self, xmin, xmax): - psi = self.psi y = np.arange(xmin, xmax, 0.01) f_y = self.f(y) from matplotlib import pyplot as plt @@ -179,6 +178,111 @@ class TanhFunction(WarpingFunction): self.d.gradient[:] = warping_grads[0, -1] +class LogTanhFunction(WarpingFunction): + """ + Trying to get the best of both worlds... + """ + def __init__(self, n_terms=3, initial_y=None): + """ + n_terms specifies the number of tanh terms to be used + """ + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + 1 + self.psi = np.ones((self.n_terms, 3)) + super(LogTanhFunction, self).__init__(name='warp_tanh') + self.psi = Param('psi', self.psi) + self.psi[:, :2].constrain_positive() + self.d = Param('%s' % ('d'), 1.0, Logexp()) + self.link_parameter(self.psi) + self.link_parameter(self.d) + self.initial_y = initial_y + + def f(self, y): + """ + Transform y with f using parameter vector psi + psi = [[a,b,c]] + + :math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))` + """ + d = self.d + mpsi = self.psi + z = d * np.log(y) + for i in range(len(mpsi)): + a, b, c = mpsi[i] + z += a * np.tanh(b * (np.log(y) + c)) + return z + + def fgrad_y(self, y, return_precalc=False): + """ + gradient of f w.r.t to y ([N x 1]) + + :returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + d = self.d + mpsi = self.psi + + # vectorized version + S = (mpsi[:,1] * (np.log(y)[:,:,None] + mpsi[:,2])).T + R = np.tanh(S) + D = 1 - (R ** 2) + + GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T / y + + if return_precalc: + return GRAD, S, R, D + + return GRAD + + def fgrad_y_psi(self, y, return_covar_chain=False): + """ + gradient of f w.r.t to y and psi + + :returns: NxIx4 tensor of partial derivatives + """ + mpsi = self.psi + + w, s, r, d = self.fgrad_y(y, return_precalc=True) + gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in range(len(mpsi)): + a,b,c = mpsi[i] + gradients[:, :, i, 0] = (b * (1.0/np.cosh(s[i])) ** 2).T / y + gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T / y + gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T / y + gradients[:, :, 0, 3] = 1.0 / y + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in range(len(mpsi)): + a,b,c = mpsi[i] + covar_grad_chain[:, :, i, 0] = (r[i]).T + covar_grad_chain[:, :, i, 1] = (a * (np.log(y) + c) * ((1.0 / np.cosh(s[i])) ** 2).T) + covar_grad_chain[:, :, i, 2] = a * b * ((1.0 / np.cosh(s[i])) ** 2).T + covar_grad_chain[:, :, 0, 3] = np.log(y) + return gradients, covar_grad_chain + + return gradients + + def _get_param_names(self): + variables = ['a', 'b', 'c', 'd'] + names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] + for q in range(self.n_terms)],[]) + names.append('warp_tanh') + return names + + def update_grads(self, Y_untransformed, Kiy): + grad_y = self.fgrad_y(Y_untransformed) + grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, + return_covar_chain=True) + djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) + dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) + + warping_grads = -dquad_dpsi + djac_dpsi + + self.psi.gradient[:] = warping_grads[:, :-1] + self.d.gradient[:] = warping_grads[0, -1] + + class LogFunction(WarpingFunction): """ Easy wrapper for applying a fixed log warping function to From 347bcc963a68242385f1e99bd6668093fbebaf5d Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Mon, 14 Mar 2016 17:40:43 +0000 Subject: [PATCH 19/28] added an exception when you input 0 or negative values to logtanh function --- GPy/util/warping_functions.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index ab7e2547..c4e8f04c 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -4,6 +4,7 @@ import numpy as np from ..core.parameterization import Parameterized, Param from paramz.transformations import Logexp +import sys class WarpingFunction(Parameterized): @@ -45,7 +46,11 @@ class WarpingFunction(Parameterized): it = 0 update = np.inf while np.abs(update).sum() > 1e-10 and it < max_iterations: - fy = self.f(y) + try: + fy = self.f(y) + except ValueError: + sys.stderr.write("One of y's is negative! Stopping inverse calculation.\n") + break fgrady = self.fgrad_y(y) update = (fy - z) / fgrady y -= self.rate * update @@ -204,6 +209,9 @@ class LogTanhFunction(WarpingFunction): :math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))` """ + if (y <= 0.0).any(): + sys.stderr.write("Negative y found!, raising exception.\n") + raise ValueError d = self.d mpsi = self.psi z = d * np.log(y) From 0b32bdf9a67eeb1c8650b63638cd08ab4bb12fc9 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 7 Apr 2016 17:32:35 +0100 Subject: [PATCH 20/28] removed logtanh, put into a new branch --- GPy/util/warping_functions.py | 108 ---------------------------------- 1 file changed, 108 deletions(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index c4e8f04c..7bc0050f 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -183,114 +183,6 @@ class TanhFunction(WarpingFunction): self.d.gradient[:] = warping_grads[0, -1] -class LogTanhFunction(WarpingFunction): - """ - Trying to get the best of both worlds... - """ - def __init__(self, n_terms=3, initial_y=None): - """ - n_terms specifies the number of tanh terms to be used - """ - self.n_terms = n_terms - self.num_parameters = 3 * self.n_terms + 1 - self.psi = np.ones((self.n_terms, 3)) - super(LogTanhFunction, self).__init__(name='warp_tanh') - self.psi = Param('psi', self.psi) - self.psi[:, :2].constrain_positive() - self.d = Param('%s' % ('d'), 1.0, Logexp()) - self.link_parameter(self.psi) - self.link_parameter(self.d) - self.initial_y = initial_y - - def f(self, y): - """ - Transform y with f using parameter vector psi - psi = [[a,b,c]] - - :math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))` - """ - if (y <= 0.0).any(): - sys.stderr.write("Negative y found!, raising exception.\n") - raise ValueError - d = self.d - mpsi = self.psi - z = d * np.log(y) - for i in range(len(mpsi)): - a, b, c = mpsi[i] - z += a * np.tanh(b * (np.log(y) + c)) - return z - - def fgrad_y(self, y, return_precalc=False): - """ - gradient of f w.r.t to y ([N x 1]) - - :returns: Nx1 vector of derivatives, unless return_precalc is true, - then it also returns the precomputed stuff - """ - d = self.d - mpsi = self.psi - - # vectorized version - S = (mpsi[:,1] * (np.log(y)[:,:,None] + mpsi[:,2])).T - R = np.tanh(S) - D = 1 - (R ** 2) - - GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T / y - - if return_precalc: - return GRAD, S, R, D - - return GRAD - - def fgrad_y_psi(self, y, return_covar_chain=False): - """ - gradient of f w.r.t to y and psi - - :returns: NxIx4 tensor of partial derivatives - """ - mpsi = self.psi - - w, s, r, d = self.fgrad_y(y, return_precalc=True) - gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) - for i in range(len(mpsi)): - a,b,c = mpsi[i] - gradients[:, :, i, 0] = (b * (1.0/np.cosh(s[i])) ** 2).T / y - gradients[:, :, i, 1] = a * (d[i] - 2.0 * s[i] * r[i] * (1.0/np.cosh(s[i])) ** 2).T / y - gradients[:, :, i, 2] = (-2.0 * a * (b ** 2) * r[i] * ((1.0 / np.cosh(s[i])) ** 2)).T / y - gradients[:, :, 0, 3] = 1.0 / y - - if return_covar_chain: - covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) - for i in range(len(mpsi)): - a,b,c = mpsi[i] - covar_grad_chain[:, :, i, 0] = (r[i]).T - covar_grad_chain[:, :, i, 1] = (a * (np.log(y) + c) * ((1.0 / np.cosh(s[i])) ** 2).T) - covar_grad_chain[:, :, i, 2] = a * b * ((1.0 / np.cosh(s[i])) ** 2).T - covar_grad_chain[:, :, 0, 3] = np.log(y) - return gradients, covar_grad_chain - - return gradients - - def _get_param_names(self): - variables = ['a', 'b', 'c', 'd'] - names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] - for q in range(self.n_terms)],[]) - names.append('warp_tanh') - return names - - def update_grads(self, Y_untransformed, Kiy): - grad_y = self.fgrad_y(Y_untransformed) - grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed, - return_covar_chain=True) - djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) - dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - - warping_grads = -dquad_dpsi + djac_dpsi - - self.psi.gradient[:] = warping_grads[:, :-1] - self.d.gradient[:] = warping_grads[0, -1] - - class LogFunction(WarpingFunction): """ Easy wrapper for applying a fixed log warping function to From 3e27748b429448ab429b6279749d80a85da9f9ca Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 7 Apr 2016 17:52:59 +0100 Subject: [PATCH 21/28] replicated the cubic sine example into warped_gp tests for code coverage --- GPy/examples/regression.py | 4 ++++ GPy/testing/model_tests.py | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 261d5a49..19919f97 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -564,9 +564,13 @@ def warped_gp_cubic_sine(max_iters=100): warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) warp_m = GPy.models.WarpedGP(X, Y, kernel=warp_k, warping_function=warp_f) + warp_m['.*\.d'].constrain_fixed(1.0) m = GPy.models.GPRegression(X, Y) m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) warp_m.optimize_restarts(parallel=False, robust=True, num_restarts=5, max_iters=max_iters) + #m.optimize(max_iters=max_iters) + #warp_m.optimize(max_iters=max_iters) + print(warp_m) print(warp_m['.*warp.*']) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 5ec64e15..c0316cf1 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -328,6 +328,28 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(np.exp(preds), warp_preds, decimal=4) + def test_warped_gp_cubic_sine(self, max_iters=100): + """ + A test replicating the cubic sine regression problem from + Snelson's paper. This test doesn't have any assertions, it's + just to ensure coverage of the tanh warping function code. + """ + X = (2 * np.pi) * np.random.random(151) - np.pi + Y = np.sin(X) + np.random.normal(0,0.2,151) + Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) + X = X[:, None] + Y = Y[:, None] + + warp_k = GPy.kern.RBF(1) + warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) + warp_m = GPy.models.WarpedGP(X, Y, kernel=warp_k, warping_function=warp_f) + warp_m['.*\.d'].constrain_fixed(1.0) + warp_m.optimize_restarts(parallel=False, robust=False, num_restarts=5, + max_iters=max_iters) + print warp_m + warp_m.predict(X) + + class GradientTests(np.testing.TestCase): def setUp(self): From ff700bbebf055bb842c8fc890085e591f465461b Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 7 Apr 2016 18:07:13 +0100 Subject: [PATCH 22/28] changed imports to relative ones --- GPy/models/warped_gp.py | 5 +++-- GPy/plotting/gpy_plot/plot_util.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 2e93ddd5..aa9e8c90 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -2,11 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from ..util.warping_functions import * +#from ..util.warping_functions import * from ..core import GP from .. import likelihoods from paramz import ObsAr -from GPy.util.warping_functions import TanhFunction +#from GPy.util.warping_functions import TanhFunction +from ..util.warping_functions import TanhFunction from GPy import kern class WarpedGP(GP): diff --git a/GPy/plotting/gpy_plot/plot_util.py b/GPy/plotting/gpy_plot/plot_util.py index 237e87cd..74e190d9 100644 --- a/GPy/plotting/gpy_plot/plot_util.py +++ b/GPy/plotting/gpy_plot/plot_util.py @@ -31,7 +31,7 @@ import numpy as np from scipy import sparse import itertools -from GPy.models import WarpedGP +from ...models import WarpedGP def in_ipynb(): try: From 2d9bdfdbfb9eaa552134c8bf79a84e5b2b1027da Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 7 Apr 2016 18:41:48 +0100 Subject: [PATCH 23/28] added tests for closed inverse in identity and log --- GPy/testing/model_tests.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index c0316cf1..144c6adf 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -301,11 +301,20 @@ class MiscTests(unittest.TestCase): warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.IdentityFunction(closed_inverse=False) - warp_m = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k, warping_function=warp_f) + warp_m = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k, + warping_function=warp_f) warp_m.optimize() warp_preds = warp_m.predict(self.X) + + warp_k_exact = GPy.kern.RBF(1) + warp_f_exact = GPy.util.warping_functions.IdentityFunction() + warp_m_exact = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k_exact, + warping_function=warp_f_exact) + warp_m_exact.optimize() + warp_preds_exact = warp_m_exact.predict(self.X) np.testing.assert_almost_equal(preds, warp_preds, decimal=4) + np.testing.assert_almost_equal(preds, warp_preds_exact, decimal=4) def test_warped_gp_log(self): """ @@ -322,11 +331,20 @@ class MiscTests(unittest.TestCase): warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.LogFunction(closed_inverse=False) - warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, warping_function=warp_f) + warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, + warping_function=warp_f) warp_m.optimize() warp_preds = warp_m.predict(self.X, median=True)[0] + + warp_k_exact = GPy.kern.RBF(1) + warp_f_exact = GPy.util.warping_functions.LogFunction() + warp_m_exact = GPy.models.WarpedGP(self.X, Y, kernel=warp_k_exact, + warping_function=warp_f_exact) + warp_m_exact.optimize(messages=True) + warp_preds_exact = warp_m_exact.predict(self.X, median=True)[0] np.testing.assert_almost_equal(np.exp(preds), warp_preds, decimal=4) + np.testing.assert_almost_equal(np.exp(preds), warp_preds_exact, decimal=4) def test_warped_gp_cubic_sine(self, max_iters=100): """ From e17ee1078bf0b08f159164ee8461a1c4ffdb1e25 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 7 Apr 2016 18:44:55 +0100 Subject: [PATCH 24/28] removed the check on f(y), it was only useful in logtanh --- GPy/util/warping_functions.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 7bc0050f..6b8b73e9 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -46,11 +46,7 @@ class WarpingFunction(Parameterized): it = 0 update = np.inf while np.abs(update).sum() > 1e-10 and it < max_iterations: - try: - fy = self.f(y) - except ValueError: - sys.stderr.write("One of y's is negative! Stopping inverse calculation.\n") - break + fy = self.f(y) fgrady = self.fgrad_y(y) update = (fy - z) / fgrady y -= self.rate * update From 28ee0f7aca87ecdb37714ffaa8ad71945099ddd8 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Mon, 11 Apr 2016 16:22:19 +0100 Subject: [PATCH 25/28] improving coverage and removing py2 print --- GPy/testing/model_tests.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 9803c62f..ef6e34cb 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -419,14 +419,16 @@ class MiscTests(unittest.TestCase): X = X[:, None] Y = Y[:, None] - warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2) - warp_m = GPy.models.WarpedGP(X, Y, kernel=warp_k, warping_function=warp_f) + warp_m = GPy.models.WarpedGP(X, Y)#, kernel=warp_k)#, warping_function=warp_f) warp_m['.*\.d'].constrain_fixed(1.0) warp_m.optimize_restarts(parallel=False, robust=False, num_restarts=5, max_iters=max_iters) - print warp_m warp_m.predict(X) + warp_m.predict_quantiles(X) + warp_m.log_predictive_density(X, Y) + warp_m.predict_in_warped_space = False + warp_m.predict(X) + warp_m.predict_quantiles(X) class GradientTests(np.testing.TestCase): From b8867f1552c05244dcd5ba38a7a57b6f1056312c Mon Sep 17 00:00:00 2001 From: mzwiessele Date: Tue, 16 Aug 2016 11:12:52 +0100 Subject: [PATCH 26/28] [kern] Add kernel was swallowing parts #fix #412 --- GPy/kern/src/add.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/GPy/kern/src/add.py b/GPy/kern/src/add.py index 99fe809b..ca20e5a9 100644 --- a/GPy/kern/src/add.py +++ b/GPy/kern/src/add.py @@ -13,15 +13,21 @@ class Add(CombinationKernel): propagates gradients through. This kernel will take over the active dims of it's subkernels passed in. + + NOTE: The subkernels will be copies of the original kernels, to prevent + unexpected behavior. """ def __init__(self, subkerns, name='sum'): - for i, kern in enumerate(subkerns[:]): + _newkerns = [] + for kern in subkerns: if isinstance(kern, Add): - del subkerns[i] - for part in kern.parts[::-1]: + for part in kern.parts: #kern.unlink_parameter(part) - subkerns.insert(i, part.copy()) - super(Add, self).__init__(subkerns, name) + _newkerns.append(part.copy()) + else: + _newkerns.append(kern.copy()) + + super(Add, self).__init__(_newkerns, name) self._exact_psicomp = self._check_exact_psicomp() def _check_exact_psicomp(self): From 0abb9b835ffeb020410bdf9a1e0532139ffa5cfc Mon Sep 17 00:00:00 2001 From: mzwiessele Date: Tue, 16 Aug 2016 11:13:32 +0100 Subject: [PATCH 27/28] Revert "[kern] Add kernel was swallowing parts #fix #412" This reverts commit b8867f1552c05244dcd5ba38a7a57b6f1056312c. --- GPy/kern/src/add.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/GPy/kern/src/add.py b/GPy/kern/src/add.py index ca20e5a9..99fe809b 100644 --- a/GPy/kern/src/add.py +++ b/GPy/kern/src/add.py @@ -13,21 +13,15 @@ class Add(CombinationKernel): propagates gradients through. This kernel will take over the active dims of it's subkernels passed in. - - NOTE: The subkernels will be copies of the original kernels, to prevent - unexpected behavior. """ def __init__(self, subkerns, name='sum'): - _newkerns = [] - for kern in subkerns: + for i, kern in enumerate(subkerns[:]): if isinstance(kern, Add): - for part in kern.parts: + del subkerns[i] + for part in kern.parts[::-1]: #kern.unlink_parameter(part) - _newkerns.append(part.copy()) - else: - _newkerns.append(kern.copy()) - - super(Add, self).__init__(_newkerns, name) + subkerns.insert(i, part.copy()) + super(Add, self).__init__(subkerns, name) self._exact_psicomp = self._check_exact_psicomp() def _check_exact_psicomp(self): From 27d49bbe015435b77b791384407c4dd012491ee8 Mon Sep 17 00:00:00 2001 From: mzwiessele Date: Tue, 16 Aug 2016 11:14:54 +0100 Subject: [PATCH 28/28] Revert "Revert "[kern] Add kernel was swallowing parts #fix #412"" This reverts commit 0abb9b835ffeb020410bdf9a1e0532139ffa5cfc. --- GPy/kern/src/add.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/GPy/kern/src/add.py b/GPy/kern/src/add.py index 99fe809b..ca20e5a9 100644 --- a/GPy/kern/src/add.py +++ b/GPy/kern/src/add.py @@ -13,15 +13,21 @@ class Add(CombinationKernel): propagates gradients through. This kernel will take over the active dims of it's subkernels passed in. + + NOTE: The subkernels will be copies of the original kernels, to prevent + unexpected behavior. """ def __init__(self, subkerns, name='sum'): - for i, kern in enumerate(subkerns[:]): + _newkerns = [] + for kern in subkerns: if isinstance(kern, Add): - del subkerns[i] - for part in kern.parts[::-1]: + for part in kern.parts: #kern.unlink_parameter(part) - subkerns.insert(i, part.copy()) - super(Add, self).__init__(subkerns, name) + _newkerns.append(part.copy()) + else: + _newkerns.append(kern.copy()) + + super(Add, self).__init__(_newkerns, name) self._exact_psicomp = self._check_exact_psicomp() def _check_exact_psicomp(self):