diff --git a/GPy/examples/warped_GP_demo.py b/GPy/examples/warped_GP_demo.py index 3b75694a..1fd8b105 100644 --- a/GPy/examples/warped_GP_demo.py +++ b/GPy/examples/warped_GP_demo.py @@ -22,7 +22,7 @@ Zmin = Z.min() Z = (Z-Zmin)/(Zmax-Zmin) - 0.5 m = GPy.models.warpedGP(X, Z, warping_terms = 2) -m.constrain_positive('(tanh_a|tanh_b|rbf|white|bias)') +m.constrain_positive('(tanh_a|tanh_b|tanh_d|rbf|white|bias)') m.randomize() plt.figure() plt.xlabel('predicted f(Z)') diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index bf5af21f..00eb94d8 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -20,10 +20,10 @@ class warpedGP(GP_regression): def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs): if warping_function == None: - self.warping_function = TanhWarpingFunction(warping_terms) - # self.warping_params = np.random.randn(self.warping_function.n_terms, 3) - self.warping_params = np.ones((self.warping_function.n_terms, 3))*0.0 # TODO better init - self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass + self.warping_function = TanhWarpingFunction_d(warping_terms) + self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1) + # self.warping_params = np.ones((self.warping_function.n_terms*3 + 1,)) # TODO better init + # self.warp_params_shape = (self.warping_function.n_terms, 4) # todo get this from the subclass self.Z = Y.copy() self.N, self.D = Y.shape @@ -31,7 +31,7 @@ class warpedGP(GP_regression): GP_regression.__init__(self, X, self.Y, **kwargs) def set_param(self, x): - self.warping_params = x[:self.warping_function.num_parameters].reshape(self.warp_params_shape).copy() + self.warping_params = x[:self.warping_function.num_parameters] self.transform_data() GP_regression.set_param(self, x[self.warping_function.num_parameters:].copy()) @@ -63,16 +63,18 @@ class warpedGP(GP_regression): ll_grads = GP_regression.log_likelihood_gradients(self) alpha = np.dot(self.Ki, self.Y.flatten()) warping_grads = self.warping_function_gradients(alpha) + + warping_grads = np.append(warping_grads[:,:-1].flatten(), warping_grads[0,-1]) return np.hstack((warping_grads.flatten(), ll_grads.flatten())) def warping_function_gradients(self, Kiy): grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params) grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params, return_covar_chain = True) - djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0) + return -dquad_dpsi + djac_dpsi def plot_warping(self): diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index 5a6dc6b3..79d8d9d8 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -155,3 +155,118 @@ class TanhWarpingFunction(WarpingFunction): variables = ['a', 'b', 'c'] names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) return names + + +class TanhWarpingFunction_d(WarpingFunction): + + def __init__(self,n_terms=3): + """n_terms specifies the number of tanh terms to be used""" + self.n_terms = n_terms + self.num_parameters = 3 * self.n_terms + 1 + + def f(self,y,psi): + """transform y with f using parameter vector psi + psi = [[a,b,c]] + f = \sum_{terms} a * tanh(b*(y+c)) + """ + + #1. check that number of params is consistent + # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' + # assert psi.shape[1] == 4, 'inconsistent parameter dimensions' + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + #3. transform data + z = d*y.copy() + for i in range(len(mpsi)): + a,b,c = mpsi[i] + z += a*np.tanh(b*(y+c)) + return z + + + def f_inv(self, y, psi, iterations = 10): + """ + calculate the numerical inverse of f + + == input == + iterations: number of N.R. iterations + + """ + + y = y.copy() + z = np.ones_like(y) + + for i in range(iterations): + z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi) + + return z + + + def fgrad_y(self, y, psi, return_precalc = False): + """ + gradient of f w.r.t to y ([N x 1]) + returns: Nx1 vector of derivatives, unless return_precalc is true, + then it also returns the precomputed stuff + """ + + + mpsi = psi.copy() + d = psi[-1] + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + # vectorized version + + S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T + R = np.tanh(S) + D = 1-R**2 + + GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T + + if return_precalc: + return GRAD, S, R, D + + + return GRAD + + + def fgrad_y_psi(self, y, psi, return_covar_chain = False): + """ + gradient of f w.r.t to y and psi + + returns: NxIx4 tensor of partial derivatives + + """ + + mpsi = psi.copy() + mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) + + gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + for i in range(len(mpsi)): + a,b,c = mpsi[i] + gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T + gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T + gradients[:,:,0,3] = 1.0 + + if return_covar_chain: + covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) + + for i in range(len(mpsi)): + a,b,c = mpsi[i] + covar_grad_chain[:, :, i, 0] = (r[i]).T + covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T) + covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T + covar_grad_chain[:, :, 0, 3] = y + + return gradients, covar_grad_chain + + return gradients + + def get_param_names(self): + variables = ['a', 'b', 'c', 'd'] + names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[]) + names.append('warp_tanh_d') + return names