Merge branch 'new_warping'

This commit is contained in:
Nicolo Fusi 2013-04-10 12:27:07 +01:00
commit 580a864f76
4 changed files with 204 additions and 47 deletions

View file

@ -0,0 +1,52 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy as sp
import pdb, sys, pickle
import matplotlib.pylab as plt
import GPy
np.random.seed(2)
N = 120
# sample inputs and outputs
X = np.random.uniform(-np.pi,np.pi,(N,1))
Y = np.sin(X)+np.random.randn(N,1)*0.05
Y += np.abs(Y.min()) + 0.5
Z = np.exp(Y)#Y**(1/3.0)
Zmax = Z.max()
Zmin = Z.min()
Z = (Z-Zmin)/(Zmax-Zmin) - 0.5
train = range(X.shape[0])[:100]
test = range(X.shape[0])[100:]
kernel = GPy.kern.rbf(1) + GPy.kern.bias(1)
m = GPy.models.warpedGP(X[train], Z[train], kernel=kernel, warping_terms = 2)
m.constrain_positive('(tanh_a|tanh_b|rbf|noise|bias)')
m.constrain_fixed('tanh_d', 1.0)
m.randomize()
plt.figure()
plt.xlabel('predicted f(Z)')
plt.ylabel('actual f(Z)')
plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'before training')
m.optimize(messages = True)
# m.optimize_restarts(4, parallel = True, messages = True)
plt.plot(m.likelihood.Y, Y[train], 'o', alpha = 0.5, label = 'after training')
plt.legend(loc = 0)
m.plot_warping()
plt.figure()
plt.title('warped GP fit')
m.plot()
m.optimize(messages=1)
plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Y[test].flatten(), 'x'); plt.title('prediction in unwarped space')
m.predict_in_warped_space = True
plt.figure(); plt.plot(m.predict(X[test])[0].flatten(), Z[test].flatten(), 'x'); plt.title('prediction in warped space')
m1 = GPy.models.GP_regression(X[train], Z[train])
m1.constrain_positive('(rbf|noise|bias)')
m1.randomize()
m1.optimize(messages = True)
plt.figure()
plt.title('GP fit')
m1.plot()

View file

@ -1,6 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import kernpart
import numpy as np

View file

@ -9,85 +9,74 @@ from ..util.linalg import pdinv
from ..util.plot import gpplot
from ..util.warping_functions import *
from GP_regression import GP_regression
from GP import GP
from .. import likelihoods
from .. import kern
class warpedGP(GP):
def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False, Xslices=None):
class warpedGP(GP_regression):
"""
TODO: fecking docstrings!
@nfusi: I'#ve hacked a little on this, but no guarantees. J.
"""
def __init__(self, X, Y, warping_function = None, warping_terms = 3, **kwargs):
if kernel is None:
kernel = kern.rbf(X.shape[1])
if warping_function == None:
self.warping_function = TanhWarpingFunction(warping_terms)
# self.warping_params = np.random.randn(self.warping_function.n_terms, 3)
self.warping_params = np.ones((self.warping_function.n_terms, 3))*0.0 # TODO better init
self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass
self.warping_function = TanhWarpingFunction_d(warping_terms)
self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1)
self.Z = Y.copy()
self.N, self.D = Y.shape
self.transform_data()
GP_regression.__init__(self, X, self.Y, **kwargs)
self.has_uncertain_inputs = False
self.Y_untransformed = Y.copy()
self.predict_in_warped_space = False
likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y)
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices)
def _set_params(self, x):
self.warping_params = x[:self.warping_function.num_parameters].reshape(self.warp_params_shape).copy()
self.transform_data()
GP_regression._set_params(self, x[self.warping_function.num_parameters:].copy())
self.warping_params = x[:self.warping_function.num_parameters]
Y = self.transform_data()
self.likelihood.set_data(Y)
GP._set_params(self, x[self.warping_function.num_parameters:].copy())
def _get_params(self):
return np.hstack((self.warping_params.flatten().copy(), GP_regression._get_params(self).copy()))
return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy()))
def _get_param_names(self):
warping_names = self.warping_function._get_param_names()
param_names = GP_regression._get_param_names(self)
param_names = GP._get_param_names(self)
return warping_names + param_names
def transform_data(self):
self.Y = self.warping_function.f(self.Z.copy(), self.warping_params).copy()
# this supports the 'smart' behaviour in GP_regression
if self.D > self.N:
self.YYT = np.dot(self.Y, self.Y.T)
else:
self.YYT = None
return self.Y
Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy()
return Y
def log_likelihood(self):
ll = GP_regression.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Z, self.warping_params)
ll = GP.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params)
return ll + np.log(jacobian).sum()
def _log_likelihood_gradients(self):
ll_grads = GP_regression._log_likelihood_gradients(self)
alpha = np.dot(self.Ki, self.Y.flatten())
ll_grads = GP._log_likelihood_gradients(self)
alpha = np.dot(self.Ki, self.likelihood.Y.flatten())
warping_grads = self.warping_function_gradients(alpha)
warping_grads = np.append(warping_grads[:,:-1].flatten(), warping_grads[0,-1])
return np.hstack((warping_grads.flatten(), ll_grads.flatten()))
def warping_function_gradients(self, Kiy):
grad_y = self.warping_function.fgrad_y(self.Z, self.warping_params)
grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Z, self.warping_params,
grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params)
grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params,
return_covar_chain = True)
djac_dpsi = ((1.0/grad_y[:,:, None, None])*grad_y_psi).sum(axis=0).sum(axis=0)
dquad_dpsi = (Kiy[:,None,None,None] * grad_psi).sum(axis=0).sum(axis=0)
return -dquad_dpsi + djac_dpsi
def plot_warping(self):
self.warping_function.plot(self.warping_params, self.Z.min(), self.Z.max())
self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max())
def predict(self, X, in_unwarped_space = False, **kwargs):
mu, var = GP_regression.predict(self, X, **kwargs)
def _raw_predict(self, *args, **kwargs):
mu, var = GP._raw_predict(self, *args, **kwargs)
# The plot() function calls _set_params() before calling predict()
# this is causing the observations to be plotted in the transformed
# space (where Y lives), making the plot looks very wrong
# if the predictions are made in the untransformed space
# (where Z lives). To fix this I included the option below. It's
# just a quick fix until I figure out something smarter.
if in_unwarped_space:
if self.predict_in_warped_space:
mu = self.warping_function.f_inv(mu, self.warping_params)
var = self.warping_function.f_inv(var, self.warping_params)

View file

@ -81,7 +81,7 @@ class TanhWarpingFunction(WarpingFunction):
iterations: number of N.R. iterations
"""
y = y.copy()
z = np.ones_like(y)
@ -155,3 +155,118 @@ class TanhWarpingFunction(WarpingFunction):
variables = ['a', 'b', 'c']
names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[])
return names
class TanhWarpingFunction_d(WarpingFunction):
def __init__(self,n_terms=3):
"""n_terms specifies the number of tanh terms to be used"""
self.n_terms = n_terms
self.num_parameters = 3 * self.n_terms + 1
def f(self,y,psi):
"""transform y with f using parameter vector psi
psi = [[a,b,c]]
f = \sum_{terms} a * tanh(b*(y+c))
"""
#1. check that number of params is consistent
# assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'
# assert psi.shape[1] == 4, 'inconsistent parameter dimensions'
mpsi = psi.copy()
d = psi[-1]
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
#3. transform data
z = d*y.copy()
for i in range(len(mpsi)):
a,b,c = mpsi[i]
z += a*np.tanh(b*(y+c))
return z
def f_inv(self, y, psi, iterations = 30):
"""
calculate the numerical inverse of f
== input ==
iterations: number of N.R. iterations
"""
y = y.copy()
z = np.ones_like(y)
for i in range(iterations):
z -= (self.f(z, psi) - y)/self.fgrad_y(z,psi)
return z
def fgrad_y(self, y, psi, return_precalc = False):
"""
gradient of f w.r.t to y ([N x 1])
returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
mpsi = psi.copy()
d = psi[-1]
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
# vectorized version
S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1-R**2
GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD
def fgrad_y_psi(self, y, psi, return_covar_chain = False):
"""
gradient of f w.r.t to y and psi
returns: NxIx4 tensor of partial derivatives
"""
mpsi = psi.copy()
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
w, s, r, d = self.fgrad_y(y, psi, return_precalc = True)
gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4))
for i in range(len(mpsi)):
a,b,c = mpsi[i]
gradients[:,:,i,0] = (b*(1.0/np.cosh(s[i]))**2).T
gradients[:,:,i,1] = a*(d[i] - 2.0*s[i]*r[i]*(1.0/np.cosh(s[i]))**2).T
gradients[:,:,i,2] = (-2.0*a*(b**2)*r[i]*((1.0/np.cosh(s[i]))**2)).T
gradients[:,:,0,3] = 1.0
if return_covar_chain:
covar_grad_chain = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4))
for i in range(len(mpsi)):
a,b,c = mpsi[i]
covar_grad_chain[:, :, i, 0] = (r[i]).T
covar_grad_chain[:, :, i, 1] = (a*(y + c) * ((1.0/np.cosh(s[i]))**2).T)
covar_grad_chain[:, :, i, 2] = a*b*((1.0/np.cosh(s[i]))**2).T
covar_grad_chain[:, :, 0, 3] = y
return gradients, covar_grad_chain
return gradients
def _get_param_names(self):
variables = ['a', 'b', 'c', 'd']
names = sum([['warp_tanh_%s_t%i' % (variables[n],q) for n in range(3)] for q in range(self.n_terms)],[])
names.append('warp_tanh_d')
return names