From 9039fae29e5fce00c5fd49beedc0fc041f2a7ef6 Mon Sep 17 00:00:00 2001 From: beckdaniel Date: Wed, 24 Feb 2016 11:24:31 +0000 Subject: [PATCH] deleted old tanh_warp and renamed warp_tanh_d to warp_tanh --- GPy/models/warped_gp.py | 4 +- GPy/testing/model_tests.py | 2 +- GPy/util/warping_functions.py | 105 +--------------------------------- 3 files changed, 5 insertions(+), 106 deletions(-) diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index d040f2d4..50be6467 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -5,7 +5,7 @@ import numpy as np from ..util.warping_functions import * from ..core import GP from .. import likelihoods -from GPy.util.warping_functions import TanhWarpingFunction_d +from GPy.util.warping_functions import TanhWarpingFunction from GPy import kern class WarpedGP(GP): @@ -15,7 +15,7 @@ class WarpedGP(GP): kernel = kern.RBF(X.shape[1]) if warping_function == None: - self.warping_function = TanhWarpingFunction_d(warping_terms) + self.warping_function = TanhWarpingFunction(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1) else: self.warping_function = warping_function diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index d637a8c6..3ced78f2 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -319,7 +319,7 @@ class MiscTests(unittest.TestCase): import matplotlib.pyplot as plt warp_k = GPy.kern.RBF(1) - warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2) + warp_f = GPy.util.warping_functions.TanhWarpingFunction(n_terms=2) warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f) m = GPy.models.GPRegression(X[:, None], Y[:, None]) diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a2f0dfa8..dd41e5ee 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -51,107 +51,6 @@ class WarpingFunction(Parameterized): class TanhWarpingFunction(WarpingFunction): - def __init__(self, n_terms=3): - """n_terms specifies the number of tanh terms to be used""" - self.n_terms = n_terms - self.num_parameters = 3 * self.n_terms - super(TanhWarpingFunction, self).__init__(name='warp_tanh') - - def f(self, y, psi): - """ - transform y with f using parameter vector psi - psi = [[a,b,c]] - ::math::`f = \\sum_{terms} a * tanh(b*(y+c))` - """ - #1. check that number of params is consistent - assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' - assert psi.shape[1] == 3, 'inconsistent parameter dimensions' - - #2. exponentiate the a and b (positive!) - mpsi = psi.copy() - - #3. transform data - z = y.copy() - for i in range(len(mpsi)): - a,b,c = mpsi[i] - z += a*np.tanh(b*(y+c)) - return z - - def f_inv(self, y, psi, iterations=10): - """ - calculate the numerical inverse of f - :param iterations: number of N.R. iterations - """ - - y = y.copy() - z = np.ones_like(y) - for i in range(iterations): - z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi) - return z - - def fgrad_y(self, y, psi, return_precalc=False): - """ - gradient of f w.r.t to y ([N x 1]) - returns: Nx1 vector of derivatives, unless return_precalc is true, - then it also returns the precomputed stuff - """ - - mpsi = psi.copy() - - # vectorized version - - # S = (mpsi[:,1]*(y + mpsi[:,2])).T - S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T - R = np.tanh(S) - D = 1-R**2 - - # GRAD = (1+(mpsi[:,0:1]*mpsi[:,1:2]*D).sum(axis=0))[:,np.newaxis] - GRAD = (1+(mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T - - if return_precalc: - # return GRAD,S.sum(axis=1),R.sum(axis=1),D.sum(axis=1) - return GRAD, S, R, D - - return GRAD - - def fgrad_y_psi(self, y, psi, return_covar_chain=False): - """ - gradient of f w.r.t to y and psi - returns: NxIx3 tensor of partial derivatives - """ - - # 1. exponentiate the a and b (positive!) - mpsi = psi.copy() - w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) - - gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 3)) - for i in range(len(mpsi)): - a,b,c = mpsi[i] - gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T - gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T - - if return_covar_chain: - covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 3)) - - for i in range(len(mpsi)): - a,b,c = mpsi[i] - covar_grad_chain[:, :, i, 0] = (r[i]).T - covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) - covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T - - return gradients, covar_grad_chain - - return gradients - - def _get_param_names(self): - variables = ['a', 'b', 'c'] - names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) - return names - - -class TanhWarpingFunction_d(WarpingFunction): - def __init__(self, n_terms=3, initial_y=None): """ n_terms specifies the number of tanh terms to be used @@ -160,7 +59,7 @@ class TanhWarpingFunction_d(WarpingFunction): self.num_parameters = 3 * self.n_terms + 1 self.psi = np.ones((self.n_terms, 3)) - super(TanhWarpingFunction_d, self).__init__(name='warp_tanh') + super(TanhWarpingFunction, self).__init__(name='warp_tanh') self.psi = Param('psi', self.psi) self.psi[:, :2].constrain_positive() @@ -271,7 +170,7 @@ class TanhWarpingFunction_d(WarpingFunction): variables = ['a', 'b', 'c', 'd'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) - names.append('warp_tanh_d') + names.append('warp_tanh') return names def update_grads(self, Y_untransformed, Kiy):