added a log warping function

This commit is contained in:
Daniel Beck 2016-02-22 19:57:54 +00:00
parent 24e9d68a19
commit c129900768
2 changed files with 66 additions and 14 deletions

View file

@ -44,17 +44,19 @@ class WarpedGP(GP):
super(WarpedGP, self).parameters_changed()
Kiy = self.posterior.woodbury_vector.flatten()
self.warping_function.update_grads(self.Y_untransformed, Kiy)
grad_y = self.warping_function.fgrad_y(self.Y_untransformed)
grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed,
return_covar_chain=True)
djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
#grad_y = self.warping_function.fgrad_y(self.Y_untransformed)
#grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed,
# return_covar_chain=True)
#djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
#dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
warping_grads = -dquad_dpsi + djac_dpsi
#warping_grads = -dquad_dpsi + djac_dpsi
self.warping_function.psi.gradient[:] = warping_grads[:, :-1]
self.warping_function.d.gradient[:] = warping_grads[0, -1]
#self.warping_function.psi.gradient[:] = warping_grads[:, :-1]
#self.warping_function.d.gradient[:] = warping_grads[0, -1]
def transform_data(self):
Y = self.warping_function.f(self.Y_untransformed.copy()).copy()
@ -160,7 +162,7 @@ class WarpedGP(GP):
mu_star, var_star = self._raw_predict(x_test)
fy = self.warping_function.f(y_test)
ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata)
return ll_lpd - np.log(self.warping_function.fgrad_y(y_test))
return ll_lpd + np.log(self.warping_function.fgrad_y(y_test))
if __name__ == '__main__':

View file

@ -278,6 +278,52 @@ class TanhWarpingFunction_d(WarpingFunction):
names.append('warp_tanh_d')
return names
def update_grads(self, Y_untransformed, Kiy):
grad_y = self.fgrad_y(Y_untransformed)
grad_y_psi, grad_psi = self.fgrad_y_psi(Y_untransformed,
return_covar_chain=True)
djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
warping_grads = -dquad_dpsi + djac_dpsi
self.psi.gradient[:] = warping_grads[:, :-1]
self.d.gradient[:] = warping_grads[0, -1]
class LogFunction(WarpingFunction):
"""
Easy wrapper for applying a fixed warping function to
positive-only values.
"""
def __init__(self):
self.num_parameters = 0
#self.psi = Param('psi', np.zeros((1,3)))
#self.d = Param('%s' % ('d'), 0.0, Logexp())
super(LogFunction, self).__init__(name='log')
#self.link_parameter(self.psi)
#self.link_parameter(self.d)
def f(self, y):
return np.log(y)
def fgrad_y(self, y):
return 1. / y
def update_grads(self, Y_untransformed, Kiy):
pass
def fgrad_y_psi(self, y, return_covar_chain=False):
gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4))
gradients = 0
if return_covar_chain:
return gradients, gradients
return gradients
def f_inv(self, z, y=None):
return np.exp(z)
class IdentityFunction(WarpingFunction):
"""
@ -285,12 +331,12 @@ class IdentityFunction(WarpingFunction):
and should not be used in practice.
"""
def __init__(self):
self.num_parameters = 4
self.psi = Param('psi', np.zeros((1,3)))
self.d = Param('%s' % ('d'), 1.0, Logexp())
self.num_parameters = 0
#self.psi = Param('psi', np.zeros((1,3)))
#self.d = Param('%s' % ('d'), 0.0, Logexp())
super(IdentityFunction, self).__init__(name='identity')
self.link_parameter(self.psi)
self.link_parameter(self.d)
#self.link_parameter(self.psi)
#self.link_parameter(self.d)
def f(self, y):
@ -299,8 +345,12 @@ class IdentityFunction(WarpingFunction):
def fgrad_y(self, y):
return np.ones(y.shape)
def update_grads(self, Y_untransformed, Kiy):
pass
def fgrad_y_psi(self, y, return_covar_chain=False):
gradients = np.zeros((y.shape[0], y.shape[1], len(self.psi), 4))
gradients = 0
if return_covar_chain:
return gradients, gradients
return gradients