merged. ish.

This commit is contained in:
James Hensman 2015-03-23 16:26:31 +00:00
commit b48d58fb1f
7 changed files with 136 additions and 86 deletions

View file

@ -6,6 +6,20 @@ from kern import CombinationKernel
from ...util.caching import Cache_this
import itertools
def numpy_invalid_op_as_exception(func):
"""
A decorator that allows catching numpy invalid operations
as exceptions (the default behaviour is raising warnings).
"""
def func_wrapper(*args, **kwargs):
np.seterr(invalid='raise')
result = func(*args, **kwargs)
np.seterr(invalid='warn')
return result
return func_wrapper
class Prod(CombinationKernel):
"""
Computes the product of 2 kernels
@ -46,28 +60,30 @@ class Prod(CombinationKernel):
self.parts[0].update_gradients_full(dL_dK*self.parts[1].K(X,X2), X, X2)
self.parts[1].update_gradients_full(dL_dK*self.parts[0].K(X,X2), X, X2)
else:
k = self.K(X,X2)*dL_dK
for p in self.parts:
p.update_gradients_full(k/p.K(X,X2),X,X2)
for combination in itertools.combinations(self.parts, len(self.parts) - 1):
prod = reduce(np.multiply, [p.K(X, X2) for p in combination])
to_update = list(set(self.parts) - set(combination))[0]
to_update.update_gradients_full(dL_dK * prod, X, X2)
def update_gradients_diag(self, dL_dKdiag, X):
if len(self.parts)==2:
self.parts[0].update_gradients_diag(dL_dKdiag*self.parts[1].Kdiag(X), X)
self.parts[1].update_gradients_diag(dL_dKdiag*self.parts[0].Kdiag(X), X)
else:
k = self.Kdiag(X)*dL_dKdiag
for p in self.parts:
p.update_gradients_diag(k/p.Kdiag(X),X)
for combination in itertools.combinations(self.parts, len(self.parts) - 1):
prod = reduce(np.multiply, [p.Kdiag(X) for p in combination])
to_update = list(set(self.parts) - set(combination))[0]
to_update.update_gradients_diag(dL_dKdiag * prod, X)
def gradients_X(self, dL_dK, X, X2=None):
target = np.zeros(X.shape)
if len(self.parts)==2:
target += self.parts[0].gradients_X(dL_dK*self.parts[1].K(X, X2), X, X2)
target += self.parts[1].gradients_X(dL_dK*self.parts[0].K(X, X2), X, X2)
else:
k = self.K(X,X2)*dL_dK
for p in self.parts:
target += p.gradients_X(k/p.K(X,X2),X,X2)
for combination in itertools.combinations(self.parts, len(self.parts) - 1):
prod = reduce(np.multiply, [p.K(X, X2) for p in combination])
to_update = list(set(self.parts) - set(combination))[0]
target += to_update.gradients_X(dL_dK * prod, X, X2)
return target
def gradients_X_diag(self, dL_dKdiag, X):
@ -80,3 +96,5 @@ class Prod(CombinationKernel):
for p in self.parts:
target += p.gradients_X_diag(k/p.Kdiag(X),X)
return target

View file

@ -43,10 +43,11 @@ class SparseGPMiniBatch(SparseGP):
def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None,
name='sparse gp', Y_metadata=None, normalizer=False,
missing_data=False, stochastic=False, batchsize=1):
#pick a sensible inference method
# pick a sensible inference method
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian):
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1])
inference_method = var_dtc.VarDTC(limit=1 if not missing_data else Y.shape[1])
else:
#inference_method = ??
raise NotImplementedError, "what to do what to do?"

View file

@ -1,7 +1,6 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util.warping_functions import *
from ..core import GP
@ -10,14 +9,16 @@ from GPy.util.warping_functions import TanhWarpingFunction_d
from GPy import kern
class WarpedGP(GP):
def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3, normalize_X=False, normalize_Y=False):
def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3):
if kernel is None:
kernel = kern.rbf(X.shape[1])
kernel = kern.RBF(X.shape[1])
if warping_function == None:
self.warping_function = TanhWarpingFunction_d(warping_terms)
self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1,) * 1)
else:
self.warping_function = warping_function
self.scale_data = False
if self.scale_data:
@ -25,10 +26,10 @@ class WarpedGP(GP):
self.has_uncertain_inputs = False
self.Y_untransformed = Y.copy()
self.predict_in_warped_space = False
likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y)
likelihood = likelihoods.Gaussian()
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
self._set_params(self._get_params())
GP.__init__(self, X, self.transform_data(), likelihood=likelihood, kernel=kernel)
self.link_parameter(self.warping_function)
def _scale_data(self, Y):
self._Ymax = Y.max()
@ -38,62 +39,55 @@ class WarpedGP(GP):
def _unscale_data(self, Y):
return (Y + 0.5) * (self._Ymax - self._Ymin) + self._Ymin
def _set_params(self, x):
self.warping_params = x[:self.warping_function.num_parameters]
Y = self.transform_data()
self.likelihood.set_data(Y)
GP._set_params(self, x[self.warping_function.num_parameters:].copy())
def parameters_changed(self):
self.Y[:] = self.transform_data()
super(WarpedGP, self).parameters_changed()
def _get_params(self):
return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy()))
Kiy = self.posterior.woodbury_vector.flatten()
def _get_param_names(self):
warping_names = self.warping_function._get_param_names()
param_names = GP._get_param_names(self)
return warping_names + param_names
def transform_data(self):
Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy()
return Y
def log_likelihood(self):
ll = GP.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params)
return ll + np.log(jacobian).sum()
def _log_likelihood_gradients(self):
ll_grads = GP._log_likelihood_gradients(self)
alpha = np.dot(self.Ki, self.likelihood.Y.flatten())
warping_grads = self.warping_function_gradients(alpha)
warping_grads = np.append(warping_grads[:, :-1].flatten(), warping_grads[0, -1])
return np.hstack((warping_grads.flatten(), ll_grads.flatten()))
def warping_function_gradients(self, Kiy):
grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params)
grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params,
grad_y = self.warping_function.fgrad_y(self.Y_untransformed)
grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed,
return_covar_chain=True)
djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0)
dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0)
return -dquad_dpsi + djac_dpsi
warping_grads = -dquad_dpsi + djac_dpsi
self.warping_function.psi.gradient[:] = warping_grads[:, :-1]
self.warping_function.d.gradient[:] = warping_grads[0, -1]
def transform_data(self):
Y = self.warping_function.f(self.Y_untransformed.copy()).copy()
return Y
def log_likelihood(self):
ll = GP.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Y_untransformed)
return ll + np.log(jacobian).sum()
def plot_warping(self):
self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max())
self.warping_function.plot(self.Y_untransformed.min(), self.Y_untransformed.max())
def predict(self, Xnew, which_parts='all', full_cov=False, pred_init=None):
def predict(self, Xnew, which_parts='all', pred_init=None):
# normalize X values
Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale
mu, var = GP._raw_predict(self, Xnew, full_cov=full_cov, which_parts=which_parts)
# Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale
mu, var = GP._raw_predict(self, Xnew)
# now push through likelihood
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
mean, var = self.likelihood.predictive_values(mu, var)
if self.predict_in_warped_space:
mean = self.warping_function.f_inv(mean, self.warping_params, y=pred_init)
var = self.warping_function.f_inv(var, self.warping_params)
mean = self.warping_function.f_inv(mean, y=pred_init)
var = self.warping_function.f_inv(var)
if self.scale_data:
mean = self._unscale_data(mean)
return mean, var, _025pm, _975pm
return mean, var
if __name__ == '__main__':
X = np.random.randn(100, 1)
Y = np.sin(X) + np.random.randn(100, 1)*0.05
m = WarpedGP(X, Y)

View file

@ -6,7 +6,11 @@ try:
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#from matplotlib import cm
pb.ion()
try:
__IPYTHON__
pb.ion()
except NameError:
pass
except:
pass
import re

View file

@ -401,11 +401,27 @@ class Coregionalize_weave_test(unittest.TestCase):
GPy.util.config.config.set('weave', 'working', 'False')
class KernelTestsProductWithZeroValues(unittest.TestCase):
def setUp(self):
self.X = np.array([[0,1],[1,0]])
self.k = GPy.kern.Linear(2) * GPy.kern.Bias(2)
def test_zero_valued_kernel_full(self):
self.k.update_gradients_full(1, self.X)
self.assertFalse(np.isnan(self.k['linear.variances'].gradient),
"Gradient resulted in NaN")
def test_zero_valued_kernel_gradients_X(self):
target = self.k.gradients_X(1, self.X)
self.assertFalse(np.any(np.isnan(target)),
"Gradient resulted in NaN")
if __name__ == "__main__":
print "Running unit tests, please be (very) patient..."
unittest.main()
# np.random.seed(0)
# N0 = 3
# N1 = 9

View file

@ -96,16 +96,21 @@ def jitchol(A, maxtries=5):
num_tries = 1
while num_tries <= maxtries and np.isfinite(jitter):
try:
print jitter
L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)
logging.warning('Added {} rounds of jitter, jitter of {:.10e}\n'.format(num_tries, jitter))
return L
except:
jitter *= 10
finally:
num_tries += 1
raise linalg.LinAlgError, "not positive definite, even with jitter."
import traceback
logging.warning('\n'.join(['Added {} rounds of jitter, jitter of {:.10e}'.format(num_tries-1, jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
raise linalg.LinAlgError, "not positive definite, even with jitter."
try: raise
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
import ipdb;ipdb.set_trace()
return L
# def dtrtri(L, lower=1):
# """

View file

@ -1,17 +1,18 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from GPy.core.parameterization import Parameterized, Param
from ..core.parameterization.transformations import Logexp
class WarpingFunction(object):
class WarpingFunction(Parameterized):
"""
abstract function for warping
z = f(y)
"""
def __init__(self):
raise NotImplementedError
def __init__(self, name):
super(WarpingFunction, self).__init__(name=name)
def f(self,y,psi):
"""function transformation
@ -34,9 +35,10 @@ class WarpingFunction(object):
def _get_param_names(self):
raise NotImplementedError
def plot(self, psi, xmin, xmax):
def plot(self, xmin, xmax):
psi = self.psi
y = np.arange(xmin, xmax, 0.01)
f_y = self.f(y, psi)
f_y = self.f(y)
from matplotlib import pyplot as plt
plt.figure()
plt.plot(y, f_y)
@ -50,6 +52,7 @@ class TanhWarpingFunction(WarpingFunction):
"""n_terms specifies the number of tanh terms to be used"""
self.n_terms = n_terms
self.num_parameters = 3 * self.n_terms
super(TanhWarpingFunction, self).__init__(name='warp_tanh')
def f(self,y,psi):
"""
@ -163,8 +166,18 @@ class TanhWarpingFunction_d(WarpingFunction):
"""n_terms specifies the number of tanh terms to be used"""
self.n_terms = n_terms
self.num_parameters = 3 * self.n_terms + 1
self.psi = np.ones((self.n_terms, 3))
def f(self,y,psi):
super(TanhWarpingFunction_d, self).__init__(name='warp_tanh')
self.psi = Param('psi', self.psi)
self.psi[:, :2].constrain_positive()
self.d = Param('%s' % ('d'), 1.0, Logexp())
self.link_parameter(self.psi)
self.link_parameter(self.d)
def f(self,y):
"""
Transform y with f using parameter vector psi
psi = [[a,b,c]]
@ -175,9 +188,9 @@ class TanhWarpingFunction_d(WarpingFunction):
#1. check that number of params is consistent
# assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'
# assert psi.shape[1] == 4, 'inconsistent parameter dimensions'
mpsi = psi.copy()
d = psi[-1]
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
d = self.d
mpsi = self.psi
#3. transform data
z = d*y.copy()
@ -187,7 +200,7 @@ class TanhWarpingFunction_d(WarpingFunction):
return z
def f_inv(self, z, psi, max_iterations=1000, y=None):
def f_inv(self, z, max_iterations=1000, y=None):
"""
calculate the numerical inverse of f
@ -198,12 +211,12 @@ class TanhWarpingFunction_d(WarpingFunction):
z = z.copy()
if y is None:
y = np.ones_like(z)
it = 0
update = np.inf
while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations):
update = (self.f(y, psi) - z)/self.fgrad_y(y, psi)
update = (self.f(y) - z)/self.fgrad_y(y)
y -= update
it += 1
if it == max_iterations:
@ -212,7 +225,7 @@ class TanhWarpingFunction_d(WarpingFunction):
return y
def fgrad_y(self, y, psi, return_precalc = False):
def fgrad_y(self, y,return_precalc = False):
"""
gradient of f w.r.t to y ([N x 1])
@ -221,9 +234,8 @@ class TanhWarpingFunction_d(WarpingFunction):
"""
mpsi = psi.copy()
d = psi[-1]
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
d = self.d
mpsi = self.psi
# vectorized version
@ -240,7 +252,7 @@ class TanhWarpingFunction_d(WarpingFunction):
return GRAD
def fgrad_y_psi(self, y, psi, return_covar_chain = False):
def fgrad_y_psi(self, y, return_covar_chain = False):
"""
gradient of f w.r.t to y and psi
@ -248,10 +260,10 @@ class TanhWarpingFunction_d(WarpingFunction):
"""
mpsi = psi.copy()
mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)
w, s, r, d = self.fgrad_y(y, psi, return_precalc = True)
mpsi = self.psi
w, s, r, d = self.fgrad_y(y, return_precalc = True)
gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4))
for i in range(len(mpsi)):