From 24e9d68a19560033d040d860fb044e5d6bc18480 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Thu, 14 Jan 2016 15:28:48 +0000 Subject: [PATCH] stuff --- GPy/models/warped_gp.py | 2 +- GPy/testing/model_tests.py | 4 ++-- GPy/util/warping_functions.py | 9 +++++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 998c0ed6..443454a1 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -160,7 +160,7 @@ class WarpedGP(GP): mu_star, var_star = self._raw_predict(x_test) fy = self.warping_function.f(y_test) ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata) - return ll_lpd * self.warping_function.fgrad_y(y_test) + return ll_lpd - np.log(self.warping_function.fgrad_y(y_test)) if __name__ == '__main__': diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 7f983411..d637a8c6 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -307,7 +307,7 @@ class MiscTests(unittest.TestCase): np.testing.assert_almost_equal(preds, warp_preds) - @unittest.skip('Comment this to plot the modified sine function') + #@unittest.skip('Comment this to plot the modified sine function') def test_warped_gp_sine(self): """ A test replicating the sine regression problem from @@ -321,7 +321,6 @@ class MiscTests(unittest.TestCase): warp_k = GPy.kern.RBF(1) warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) - warp_m['.*noise.variance.*'].constrain_fixed(0.1) m = GPy.models.GPRegression(X[:, None], Y[:, None]) m.optimize_restarts(parallel=False, robust=True, num_restarts=5) @@ -330,6 +329,7 @@ class MiscTests(unittest.TestCase): print(warp_m['.*warp.*']) warp_m.predict_in_warped_space = False warp_m.plot() + import ipdb; ipdb.set_trace() warp_m.predict_in_warped_space = True warp_m.plot() m.plot() diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 516103bf..ce99a940 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -152,7 +152,7 @@ class TanhWarpingFunction(WarpingFunction): class TanhWarpingFunction_d(WarpingFunction): - def __init__(self, n_terms=3): + def __init__(self, n_terms=3, initial_y=None): """n_terms specifies the number of tanh terms to be used""" self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + 1 @@ -165,6 +165,7 @@ class TanhWarpingFunction_d(WarpingFunction): self.d = Param('%s' % ('d'), 1.0, Logexp()) self.link_parameter(self.psi) self.link_parameter(self.d) + self.initial_y = initial_y def f(self, y): """ @@ -187,7 +188,7 @@ class TanhWarpingFunction_d(WarpingFunction): z += a*np.tanh(b*(y+c)) return z - def f_inv(self, z, max_iterations=1000, y=None): + def f_inv(self, z, max_iterations=100, y=None): """ calculate the numerical inverse of f @@ -195,12 +196,15 @@ class TanhWarpingFunction_d(WarpingFunction): """ z = z.copy() + if y is None: # The idea here is to initialize y with +1 where # z is positive and -1 where it is negative. # For negative z, Newton-Raphson diverges # if we initialize y with a positive value (and vice-versa). y = ((z > 0) * 1.) - (z <= 0) + if self.initial_y is not None: + y *= self.initial_y it = 0 update = np.inf @@ -213,6 +217,7 @@ class TanhWarpingFunction_d(WarpingFunction): it += 1 if it == max_iterations: print("WARNING!!! Maximum number of iterations reached in f_inv ") + print("Sum of updates: %.4f" % np.sum(update)) return y def fgrad_y(self, y, return_precalc=False):