mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
added a rate to inverse calculation
This commit is contained in:
parent
91e625a9bd
commit
5534c45b0a
3 changed files with 173 additions and 23 deletions
|
|
@ -68,22 +68,22 @@ class WarpedGP(GP):
|
|||
arg2 = np.ones(shape=gh_samples.shape).dot(mean.T)
|
||||
return self.warping_function.f_inv(arg1 + arg2, y=pred_init)
|
||||
|
||||
def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=100):
|
||||
def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=20):
|
||||
"""
|
||||
Calculate the warped mean by using Gauss-Hermite quadrature.
|
||||
"""
|
||||
gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite)
|
||||
gh_samples = gh_samples[:,None]
|
||||
gh_weights = gh_weights[None,:]
|
||||
gh_samples = gh_samples[:, None]
|
||||
gh_weights = gh_weights[None, :]
|
||||
return gh_weights.dot(self._get_warped_term(mean, std, gh_samples)) / np.sqrt(np.pi)
|
||||
|
||||
def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=100):
|
||||
def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=20):
|
||||
"""
|
||||
Calculate the warped variance by using Gauss-Hermite quadrature.
|
||||
"""
|
||||
gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite)
|
||||
gh_samples = gh_samples[:,None]
|
||||
gh_weights = gh_weights[None,:]
|
||||
gh_samples = gh_samples[:, None]
|
||||
gh_weights = gh_weights[None, :]
|
||||
arg1 = gh_weights.dot(self._get_warped_term(mean, std, gh_samples,
|
||||
pred_init=pred_init) ** 2) / np.sqrt(np.pi)
|
||||
arg2 = self._get_warped_mean(mean, std, pred_init=pred_init,
|
||||
|
|
@ -91,7 +91,7 @@ class WarpedGP(GP):
|
|||
return arg1 - (arg2 ** 2)
|
||||
|
||||
def predict(self, Xnew, which_parts='all', pred_init=None, full_cov=False, Y_metadata=None,
|
||||
median=False, deg_gauss_hermite=100, likelihood=None):
|
||||
median=False, deg_gauss_hermite=20, likelihood=None):
|
||||
"""
|
||||
Prediction results depend on:
|
||||
- The value of the self.predict_in_warped_space flag
|
||||
|
|
|
|||
|
|
@ -307,6 +307,31 @@ class MiscTests(unittest.TestCase):
|
|||
|
||||
np.testing.assert_almost_equal(preds, warp_preds)
|
||||
|
||||
def test_warped_gp_log(self):
|
||||
"""
|
||||
A WarpedGP with the log warping function should be
|
||||
equal to a standard GP with log labels.
|
||||
"""
|
||||
k = GPy.kern.RBF(1)
|
||||
Y = np.abs(self.Y)
|
||||
logY = np.log(Y)
|
||||
m = GPy.models.GPRegression(self.X, logY, kernel=k)
|
||||
#m.optimize()
|
||||
m['Gaussian_noise.variance'] = 1e-4
|
||||
preds = m.predict(self.X)[0]
|
||||
|
||||
warp_k = GPy.kern.RBF(1)
|
||||
warp_f = GPy.util.warping_functions.LogFunction()
|
||||
warp_m = GPy.models.WarpedGP(self.X, Y, kernel=warp_k, warping_function=warp_f)
|
||||
warp_m.optimize()
|
||||
warp_m['.*'] = 1.0
|
||||
warp_m['Gaussian_noise.variance'] = 1e-4
|
||||
warp_preds = warp_m.predict(self.X, median=True)[0]
|
||||
#print np.exp(preds)
|
||||
#print warp_preds
|
||||
|
||||
np.testing.assert_almost_equal(np.exp(preds), warp_preds)
|
||||
|
||||
#@unittest.skip('Comment this to plot the modified sine function')
|
||||
def test_warped_gp_sine(self):
|
||||
"""
|
||||
|
|
@ -316,12 +341,13 @@ class MiscTests(unittest.TestCase):
|
|||
X = (2 * np.pi) * np.random.random(151) - np.pi
|
||||
Y = np.sin(X) + np.random.normal(0,0.2,151)
|
||||
Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y])
|
||||
Y = np.abs(Y)
|
||||
#Y = np.abs(Y)
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
warp_k = GPy.kern.RBF(1)
|
||||
#warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2)
|
||||
warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2)
|
||||
warp_f = GPy.util.warping_functions.TanhFunction(n_terms=2)
|
||||
#warp_f = GPy.util.warping_functions.LogisticFunction(n_terms=2)
|
||||
#warp_f = GPy.util.warping_functions.LogitFunction(n_terms=1)
|
||||
warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f)
|
||||
|
||||
m = GPy.models.GPRegression(X[:, None], Y[:, None])
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ class TanhFunction(WarpingFunction):
|
|||
self.link_parameter(self.psi)
|
||||
self.link_parameter(self.d)
|
||||
self.initial_y = initial_y
|
||||
self.rate = 0.1
|
||||
|
||||
def f(self, y):
|
||||
"""
|
||||
|
|
@ -94,28 +95,19 @@ class TanhFunction(WarpingFunction):
|
|||
"""
|
||||
|
||||
z = z.copy()
|
||||
|
||||
if y is None:
|
||||
# The idea here is to initialize y with +1 where
|
||||
# z is positive and -1 where it is negative.
|
||||
# For negative z, Newton-Raphson diverges
|
||||
# if we initialize y with a positive value (and vice-versa).
|
||||
y = ((z > 0) * 1.) - (z <= 0)
|
||||
if self.initial_y is not None:
|
||||
y *= self.initial_y
|
||||
y = np.ones_like(z)
|
||||
|
||||
it = 0
|
||||
update = np.inf
|
||||
|
||||
while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations):
|
||||
while np.abs(update).sum() > 1e-10 and it < max_iterations:
|
||||
fy = self.f(y)
|
||||
fgrady = self.fgrad_y(y)
|
||||
update = (fy - z) / fgrady
|
||||
y -= update
|
||||
y -= self.rate * update
|
||||
it += 1
|
||||
if it == max_iterations:
|
||||
print("WARNING!!! Maximum number of iterations reached in f_inv ")
|
||||
print("Sum of updates: %.4f" % np.sum(update))
|
||||
print("Sum of roots: %.4f" % np.sum(fy - z))
|
||||
return y
|
||||
|
||||
def fgrad_y(self, y, return_precalc=False):
|
||||
|
|
@ -391,3 +383,135 @@ class LogisticFunction(WarpingFunction):
|
|||
self.psi.gradient[:] = warping_grads[:, :-1]
|
||||
self.d.gradient[:] = warping_grads[0, -1]
|
||||
|
||||
|
||||
class LogitFunction(WarpingFunction):
|
||||
"""
|
||||
A sum of logit functions.
|
||||
"""
|
||||
def __init__(self, n_terms=1, initial_y=None):
|
||||
"""
|
||||
n_terms specifies the number of logistic terms to be used
|
||||
"""
|
||||
self.n_terms = n_terms
|
||||
self.num_parameters = 3 * self.n_terms
|
||||
self.psi = np.ones((self.n_terms, 3))
|
||||
super(LogitFunction, self).__init__(name='warp_logit')
|
||||
self.psi = Param('psi', self.psi)
|
||||
self.psi[:, :2].constrain_positive()
|
||||
self.link_parameter(self.psi)
|
||||
self.initial_y = initial_y
|
||||
self.e = 1e-5
|
||||
|
||||
def _logit(self, y):
|
||||
a, b, c = self.psi[0]
|
||||
y += self.e
|
||||
return ((np.log(y) - np.log(a - y)) / b) + c
|
||||
|
||||
def f(self, y):
|
||||
"""
|
||||
Transform y with f using parameter vector psi
|
||||
psi = [[a,b,c]]
|
||||
|
||||
:math:`f = (y * d) + \\sum_{terms} a * logistic(b *(y + c))`
|
||||
"""
|
||||
z = np.zeros_like(y)
|
||||
for i in xrange(self.n_terms):
|
||||
a, b, c = self.psi[i]
|
||||
z += self._logit(y)
|
||||
return z
|
||||
|
||||
def f_inv(self, z, max_iterations=100, y=None):
|
||||
"""
|
||||
calculate the numerical inverse of f
|
||||
|
||||
:param max_iterations: maximum number of N.R. iterations
|
||||
"""
|
||||
z = z.copy()
|
||||
if y is None:
|
||||
# The idea here is to initialize y with +1 where
|
||||
# z is positive and -1 where it is negative.
|
||||
# For negative z, Newton-Raphson diverges
|
||||
# if we initialize y with a positive value (and vice-versa).
|
||||
y = ((z > 0) * 1.) - (z <= 0)
|
||||
if self.initial_y is not None:
|
||||
y *= self.initial_y
|
||||
it = 0
|
||||
update = np.inf
|
||||
while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations):
|
||||
fy = self.f(y)
|
||||
fgrady = self.fgrad_y(y)
|
||||
update = (fy - z) / fgrady
|
||||
y -= update
|
||||
it += 1
|
||||
if it == max_iterations:
|
||||
print("WARNING!!! Maximum number of iterations reached in f_inv ")
|
||||
print("Sum of updates: %.4f" % np.sum(update))
|
||||
return y
|
||||
|
||||
def fgrad_y(self, y, return_precalc=False):
|
||||
"""
|
||||
Gradient of f w.r.t to y ([N x 1])
|
||||
This vectorized version calculates all summation terms
|
||||
at the same time (since the grad of a sum is the sum of the grads).
|
||||
|
||||
:returns: Nx1 vector of derivatives, unless return_precalc is true,
|
||||
then it also returns the precomputed stuff
|
||||
"""
|
||||
a, b, c = self.psi[0]
|
||||
|
||||
# vectorized version
|
||||
# term = b * (y + c)
|
||||
y += self.e
|
||||
yb = y * b
|
||||
ab = a * b
|
||||
grad = (1. / yb) + (1. / (ab - yb))
|
||||
|
||||
if return_precalc:
|
||||
return grad, yb, ab
|
||||
return grad
|
||||
|
||||
def fgrad_y_psi(self, y, return_covar_chain=False):
|
||||
"""
|
||||
gradient of f w.r.t to y and psi
|
||||
|
||||
:returns: NxIx4 tensor of partial derivatives
|
||||
"""
|
||||
grad, yb, ab = self.fgrad_y(y, return_precalc=True)
|
||||
gradients = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3))
|
||||
for i in xrange(self.n_terms):
|
||||
a, b, c = self.psi[i]
|
||||
gradients[:, :, i, 0] = -b / ((ab - yb) ** 2)
|
||||
yb2 = y * (b ** 2)
|
||||
ab2 = a * (b ** 2)
|
||||
gradients[:, :, i, 1] = - (1. / yb2) - (1. / (ab2 - yb2))
|
||||
#gradients[:, :, i, 2] = 0.0
|
||||
|
||||
if return_covar_chain:
|
||||
covar_grad_chain = np.zeros((y.shape[0], y.shape[1], self.n_terms, 3))
|
||||
for i in xrange(self.n_terms):
|
||||
a, b, c = self.psi[i]
|
||||
covar_grad_chain[:, :, i, 0] = - (1. / (ab - yb))
|
||||
covar_grad_chain[:, :, i, 1] = - (np.log(y) - np.log(a - y)) / (b ** 2)
|
||||
covar_grad_chain[:, :, i, 2] = 1.0
|
||||
return gradients, covar_grad_chain
|
||||
|
||||
return gradients
|
||||
|
||||
def _get_param_names(self):
|
||||
variables = ['a', 'b', 'c']
|
||||
names = sum([['warp_logit_%s_t%i' % (variables[n],q) for n in range(3)]
|
||||
for q in range(self.n_terms)],[])
|
||||
names.append('warp_logit')
|
||||
return names
|
||||
|
||||
def update_grads(self, Y_untransformed, Kiy):
|
||||
grad_y = self.fgrad_y(Y_untransformed)
|
||||
grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed,
|
||||
return_covar_chain=True)
|
||||
djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
|
||||
dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
|
||||
|
||||
warping_grads = -dquad_dpsi + djac_dpsi
|
||||
|
||||
self.psi.gradient[:] = warping_grads[:, :]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue