mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 14:03:20 +02:00
MERGE
This commit is contained in:
commit
c4cd93cd17
11 changed files with 38 additions and 34 deletions
|
|
@ -29,7 +29,7 @@ class GP(GPBase):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
||||||
super(GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
|
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
||||||
def _set_params(self, p):
|
def _set_params(self, p):
|
||||||
|
|
@ -53,6 +53,10 @@ class GP(GPBase):
|
||||||
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
||||||
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
|
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
|
||||||
|
|
||||||
|
def _get_params(self):
|
||||||
|
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
|
||||||
|
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,14 +28,11 @@ class GPBase(model.model):
|
||||||
self._Xmean = np.zeros((1,self.Q))
|
self._Xmean = np.zeros((1,self.Q))
|
||||||
self._Xstd = np.ones((1,self.Q))
|
self._Xstd = np.ones((1,self.Q))
|
||||||
|
|
||||||
super(GPBase, self).__init__()
|
model.model.__init__(self)
|
||||||
|
|
||||||
# All leaf nodes should call self._set_params(self._get_params()) at
|
# All leaf nodes should call self._set_params(self._get_params()) at
|
||||||
# the end
|
# the end
|
||||||
|
|
||||||
def _get_params(self):
|
|
||||||
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
|
|
||||||
|
|
||||||
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False):
|
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False):
|
||||||
"""
|
"""
|
||||||
Plot the GP's view of the world, where the data is normalized and the
|
Plot the GP's view of the world, where the data is normalized and the
|
||||||
|
|
@ -77,8 +74,6 @@ class GPBase(model.model):
|
||||||
ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None])))
|
ymin, ymax = min(np.append(self.likelihood.Y, m - 2 * np.sqrt(np.diag(v)[:, None]))), max(np.append(self.likelihood.Y, m + 2 * np.sqrt(np.diag(v)[:, None])))
|
||||||
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
||||||
pb.ylim(ymin, ymax)
|
pb.ylim(ymin, ymax)
|
||||||
if hasattr(self, 'Z'):
|
|
||||||
pb.plot(self.Z, self.Z * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12)
|
|
||||||
|
|
||||||
elif self.X.shape[1] == 2:
|
elif self.X.shape[1] == 2:
|
||||||
resolution = resolution or 50
|
resolution = resolution or 50
|
||||||
|
|
|
||||||
|
|
@ -34,13 +34,13 @@ class model(parameterised):
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
raise NotImplementedError, "this needs to be implemented to use the model class"
|
raise NotImplementedError, "this needs to be implemented to use the model class"
|
||||||
|
|
||||||
def set_prior(self, which, what):
|
def set_prior(self, regexp, what):
|
||||||
"""
|
"""
|
||||||
Sets priors on the model parameters.
|
Sets priors on the model parameters.
|
||||||
|
|
||||||
Arguments
|
Arguments
|
||||||
---------
|
---------
|
||||||
which -- string, regexp, or integer array
|
regexp -- string, regexp, or integer array
|
||||||
what -- instance of a Prior class
|
what -- instance of a Prior class
|
||||||
|
|
||||||
Notes
|
Notes
|
||||||
|
|
@ -52,6 +52,8 @@ class model(parameterised):
|
||||||
For tied parameters, the Prior will only be "counted" once, thus
|
For tied parameters, the Prior will only be "counted" once, thus
|
||||||
a Prior object is only inserted on the first tied index
|
a Prior object is only inserted on the first tied index
|
||||||
"""
|
"""
|
||||||
|
if self.priors is None:
|
||||||
|
self.priors = [None for i in range(self._get_params().size)]
|
||||||
|
|
||||||
which = self.grep_param_names(which)
|
which = self.grep_param_names(which)
|
||||||
|
|
||||||
|
|
@ -105,11 +107,16 @@ class model(parameterised):
|
||||||
raise AttributeError, "no parameter matches %s" % name
|
raise AttributeError, "no parameter matches %s" % name
|
||||||
|
|
||||||
def log_prior(self):
|
def log_prior(self):
|
||||||
"""evaluate the Prior"""
|
"""evaluate the prior"""
|
||||||
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
|
if self.priors is not None:
|
||||||
|
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
|
||||||
|
else:
|
||||||
|
return 0.
|
||||||
|
|
||||||
def _log_prior_gradients(self):
|
def _log_prior_gradients(self):
|
||||||
"""evaluate the gradients of the priors"""
|
"""evaluate the gradients of the priors"""
|
||||||
|
if self.priors is None:
|
||||||
|
return 0.
|
||||||
x = self._get_params()
|
x = self._get_params()
|
||||||
ret = np.zeros(x.size)
|
ret = np.zeros(x.size)
|
||||||
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
|
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
|
||||||
|
|
@ -137,9 +144,10 @@ class model(parameterised):
|
||||||
self._set_params_transformed(x)
|
self._set_params_transformed(x)
|
||||||
# now draw from Prior where possible
|
# now draw from Prior where possible
|
||||||
x = self._get_params()
|
x = self._get_params()
|
||||||
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
|
if self.priors is not None:
|
||||||
|
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
|
||||||
self._set_params(x)
|
self._set_params(x)
|
||||||
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one Prior object...)
|
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
||||||
|
|
||||||
|
|
||||||
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
|
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
|
||||||
|
|
@ -236,16 +244,13 @@ class model(parameterised):
|
||||||
Gets the gradients from the likelihood and the priors.
|
Gets the gradients from the likelihood and the priors.
|
||||||
"""
|
"""
|
||||||
self._set_params_transformed(x)
|
self._set_params_transformed(x)
|
||||||
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
|
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
|
||||||
prior_gradients = self._transform_gradients(self._log_prior_gradients())
|
return obj_grads
|
||||||
return -LL_gradients - prior_gradients
|
|
||||||
|
|
||||||
def objective_and_gradients(self, x):
|
def objective_and_gradients(self, x):
|
||||||
self._set_params_transformed(x)
|
self._set_params_transformed(x)
|
||||||
obj_f = -self.log_likelihood() - self.log_prior()
|
obj_f = -self.log_likelihood() - self.log_prior()
|
||||||
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
|
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
|
||||||
prior_gradients = self._transform_gradients(self._log_prior_gradients())
|
|
||||||
obj_grads = -LL_gradients - prior_gradients
|
|
||||||
return obj_f, obj_grads
|
return obj_f, obj_grads
|
||||||
|
|
||||||
def optimize(self, optimizer=None, start=None, **kwargs):
|
def optimize(self, optimizer=None, start=None, **kwargs):
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
import pylab as pb
|
||||||
from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides, chol_inv
|
from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides, chol_inv
|
||||||
from GP import GP
|
|
||||||
from scipy import linalg
|
from scipy import linalg
|
||||||
from ..likelihoods import Gaussian
|
from ..likelihoods import Gaussian
|
||||||
from gp_base import GPBase
|
from gp_base import GPBase
|
||||||
|
|
@ -30,7 +29,7 @@ class sparse_GP(GPBase):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
||||||
super(sparse_GP, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
|
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
|
|
||||||
self.Z = Z
|
self.Z = Z
|
||||||
self.M = Z.shape[0]
|
self.M = Z.shape[0]
|
||||||
|
|
@ -160,10 +159,11 @@ class sparse_GP(GPBase):
|
||||||
self._computations()
|
self._computations()
|
||||||
|
|
||||||
def _get_params(self):
|
def _get_params(self):
|
||||||
return np.hstack([self.Z.flatten(), GP._get_params(self)])
|
return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()])
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], []) + GP._get_param_names(self)
|
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[])\
|
||||||
|
+ self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||||
|
|
||||||
def update_likelihood_approximation(self):
|
def update_likelihood_approximation(self):
|
||||||
"""
|
"""
|
||||||
|
|
@ -282,7 +282,7 @@ class sparse_GP(GPBase):
|
||||||
return mean, var, _025pm, _975pm
|
return mean, var, _025pm, _975pm
|
||||||
|
|
||||||
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20):
|
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20):
|
||||||
super(sparse_GP, self).plot(samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20)
|
GPBase.plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20)
|
||||||
if self.X.shape[1] == 1:
|
if self.X.shape[1] == 1:
|
||||||
Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now
|
Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now
|
||||||
if self.has_uncertain_inputs:
|
if self.has_uncertain_inputs:
|
||||||
|
|
|
||||||
|
|
@ -26,13 +26,13 @@ class GPLVM(GP):
|
||||||
:type init: 'PCA'|'random'
|
:type init: 'PCA'|'random'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, Y, Q, init='PCA', X = None, kernel=None, normalize_Y=False, **kwargs):
|
def __init__(self, Y, Q, init='PCA', X = None, kernel=None, normalize_Y=False):
|
||||||
if X is None:
|
if X is None:
|
||||||
X = self.initialise_latent(init, Q, Y)
|
X = self.initialise_latent(init, Q, Y)
|
||||||
if kernel is None:
|
if kernel is None:
|
||||||
kernel = kern.rbf(Q, ARD=Q>1) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
kernel = kern.rbf(Q, ARD=Q>1) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||||
likelihood = Gaussian(Y, normalize=normalize_Y)
|
likelihood = Gaussian(Y, normalize=normalize_Y)
|
||||||
super(GPLVM, self).__init__(self, X, likelihood, kernel, **kwargs)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=False)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
||||||
def initialise_latent(self, init, Q, Y):
|
def initialise_latent(self, init, Q, Y):
|
||||||
|
|
|
||||||
|
|
@ -31,5 +31,5 @@ class GP_regression(GP):
|
||||||
|
|
||||||
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
||||||
|
|
||||||
super(GP_regression, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ class generalized_FITC(sparse_GP):
|
||||||
self.M = self.Z.shape[0]
|
self.M = self.Z.shape[0]
|
||||||
self.true_precision = likelihood.precision
|
self.true_precision = likelihood.precision
|
||||||
|
|
||||||
super(generalized_FITC, self).__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False)
|
super(generalized_FITC, self).__init__(X, likelihood, kernel=kernel, Z=self.Z, X_variance=X_variance, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
||||||
def _set_params(self, p):
|
def _set_params(self, p):
|
||||||
|
|
|
||||||
|
|
@ -23,9 +23,9 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM):
|
||||||
:type init: 'PCA'|'random'
|
:type init: 'PCA'|'random'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, Y, Q, init='PCA', **kwargs):
|
def __init__(self, Y, Q, kernel=None, init='PCA', M=10):
|
||||||
X = self.initialise_latent(init, Q, Y)
|
X = self.initialise_latent(init, Q, Y)
|
||||||
sparse_GP_regression.__init__(self, X, Y, **kwargs)
|
sparse_GP_regression.__init__(self, X, Y, kernel=kernel,M=M)
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
return (sum([['X_%i_%i'%(n,q) for q in range(self.Q)] for n in range(self.N)],[])
|
return (sum([['X_%i_%i'%(n,q) for q in range(self.Q)] for n in range(self.N)],[])
|
||||||
|
|
|
||||||
|
|
@ -43,5 +43,5 @@ class sparse_GP_regression(sparse_GP):
|
||||||
#likelihood defaults to Gaussian
|
#likelihood defaults to Gaussian
|
||||||
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
||||||
|
|
||||||
super(sparse_GP_regression, self).__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X)
|
sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ class warpedGP(GP):
|
||||||
self.predict_in_warped_space = False
|
self.predict_in_warped_space = False
|
||||||
likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y)
|
||||||
|
|
||||||
super(warpedGP, self).__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self._set_params(self._get_params())
|
||||||
|
|
||||||
def _scale_data(self, Y):
|
def _scale_data(self, Y):
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,7 @@ class GradientTests(unittest.TestCase):
|
||||||
kernel = GPy.kern.rbf(1)
|
kernel = GPy.kern.rbf(1)
|
||||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||||
likelihood = GPy.likelihoods.EP(Y, distribution)
|
likelihood = GPy.likelihoods.EP(Y, distribution)
|
||||||
m = GPy.models.GP(X, likelihood, kernel)
|
m = GPy.core.GP(X, likelihood, kernel)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.update_likelihood_approximation()
|
m.update_likelihood_approximation()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
@ -185,7 +185,7 @@ class GradientTests(unittest.TestCase):
|
||||||
kernel = GPy.kern.rbf(1)
|
kernel = GPy.kern.rbf(1)
|
||||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||||
likelihood = GPy.likelihoods.EP(Y, distribution)
|
likelihood = GPy.likelihoods.EP(Y, distribution)
|
||||||
m = GPy.models.sparse_GP(X, likelihood, kernel,Z)
|
m = GPy.core.sparse_GP(X, likelihood, kernel,Z)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.update_likelihood_approximation()
|
m.update_likelihood_approximation()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue