refactored the numeric inverse into the mother class, to test Identity and Log

This commit is contained in:
beckdaniel 2016-03-02 17:05:22 +00:00
parent 7bee3daac8
commit 6bea908234
2 changed files with 43 additions and 48 deletions

View file

@ -300,12 +300,12 @@ class MiscTests(unittest.TestCase):
preds = m.predict(self.X)
warp_k = GPy.kern.RBF(1)
warp_f = GPy.util.warping_functions.IdentityFunction()
warp_f = GPy.util.warping_functions.IdentityFunction(closed_inverse=False)
warp_m = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k, warping_function=warp_f)
warp_m.optimize()
warp_preds = warp_m.predict(self.X)
np.testing.assert_almost_equal(preds, warp_preds)
np.testing.assert_almost_equal(preds, warp_preds, decimal=4)
def test_warped_gp_log(self):
"""
@ -316,21 +316,16 @@ class MiscTests(unittest.TestCase):
Y = np.abs(self.Y)
logY = np.log(Y)
m = GPy.models.GPRegression(self.X, logY, kernel=k)
#m.optimize()
m['Gaussian_noise.variance'] = 1e-4
m.optimize()
preds = m.predict(self.X)[0]
warp_k = GPy.kern.RBF(1)
warp_f = GPy.util.warping_functions.LogFunction()
warp_f = GPy.util.warping_functions.LogFunction(closed_inverse=False)
warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, warping_function=warp_f)
warp_m.optimize()
warp_m['.*'] = 1.0
warp_m['Gaussian_noise.variance'] = 1e-4
warp_preds = warp_m.predict(self.X, median=True)[0]
#print np.exp(preds)
#print warp_preds
np.testing.assert_almost_equal(np.exp(preds), warp_preds)
np.testing.assert_almost_equal(np.exp(preds), warp_preds, decimal=4)
@unittest.skip('Comment this to plot the modified sine function')
def test_warped_gp_sine(self):
@ -354,19 +349,13 @@ class MiscTests(unittest.TestCase):
print(warp_m['.*warp.*'])
warp_m.predict_in_warped_space = False
warp_m.plot()
import ipdb; ipdb.set_trace()
warp_m.predict_in_warped_space = True
warp_m.plot()
m.plot()
warp_f.plot(X.min()-10, X.max()+10)
plt.show()
class GradientTests(np.testing.TestCase):
def setUp(self):
######################################

View file

@ -14,6 +14,7 @@ class WarpingFunction(Parameterized):
def __init__(self, name):
super(WarpingFunction, self).__init__(name=name)
self.rate = 0.1
def f(self, y, psi):
"""function transformation
@ -29,9 +30,30 @@ class WarpingFunction(Parameterized):
"""gradient of f w.r.t to y"""
raise NotImplementedError
def f_inv(self, z, psi):
"""inverse function transformation"""
raise NotImplementedError
def f_inv(self, z, max_iterations=100, y=None):
"""
Calculate the numerical inverse of f. This should be
overwritten for specific warping functions where the
inverse can be found in closed form.
:param max_iterations: maximum number of N.R. iterations
"""
z = z.copy()
y = np.ones_like(z)
it = 0
update = np.inf
while np.abs(update).sum() > 1e-10 and it < max_iterations:
fy = self.f(y)
fgrady = self.fgrad_y(y)
update = (fy - z) / fgrady
y -= self.rate * update
it += 1
if it == max_iterations:
print("WARNING!!! Maximum number of iterations reached in f_inv ")
print("Sum of roots: %.4f" % np.sum(fy - z))
return y
def _get_param_names(self):
raise NotImplementedError
@ -70,7 +92,6 @@ class TanhFunction(WarpingFunction):
self.link_parameter(self.psi)
self.link_parameter(self.d)
self.initial_y = initial_y
self.rate = 0.1
def f(self, y):
"""
@ -87,29 +108,6 @@ class TanhFunction(WarpingFunction):
z += a * np.tanh(b * (y + c))
return z
def f_inv(self, z, max_iterations=100, y=None):
"""
calculate the numerical inverse of f
:param max_iterations: maximum number of N.R. iterations
"""
z = z.copy()
y = np.ones_like(z)
it = 0
update = np.inf
while np.abs(update).sum() > 1e-10 and it < max_iterations:
fy = self.f(y)
fgrady = self.fgrad_y(y)
update = (fy - z) / fgrady
y -= self.rate * update
it += 1
if it == max_iterations:
print("WARNING!!! Maximum number of iterations reached in f_inv ")
print("Sum of roots: %.4f" % np.sum(fy - z))
return y
def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
@ -183,12 +181,16 @@ class TanhFunction(WarpingFunction):
class LogFunction(WarpingFunction):
"""
Easy wrapper for applying a fixed warping function to
Easy wrapper for applying a fixed log warping function to
positive-only values.
The closed_inverse flag should only be set to False for
debugging and testing purposes.
"""
def __init__(self):
def __init__(self, closed_inverse=True):
self.num_parameters = 0
super(LogFunction, self).__init__(name='log')
if closed_inverse:
self.f_inv = self._f_inv
def f(self, y):
return np.log(y)
@ -204,7 +206,7 @@ class LogFunction(WarpingFunction):
return 0, 0
return 0
def f_inv(self, z, y=None):
def _f_inv(self, z, y=None):
return np.exp(z)
@ -212,10 +214,14 @@ class IdentityFunction(WarpingFunction):
"""
Identity warping function. This is for testing and sanity check purposes
and should not be used in practice.
The closed_inverse flag should only be set to False for
debugging and testing purposes.
"""
def __init__(self):
def __init__(self, closed_inverse=True):
self.num_parameters = 0
super(IdentityFunction, self).__init__(name='identity')
if closed_inverse:
self.f_inv = self._f_inv
def f(self, y):
return y
@ -231,6 +237,6 @@ class IdentityFunction(WarpingFunction):
return 0, 0
return 0
def f_inv(self, z, y=None):
def _f_inv(self, z, y=None):
return z