mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 20:42:39 +02:00
first adjustments to the model and gps -> updates and gradient transforms
This commit is contained in:
parent
d12f055115
commit
9a70f27010
4 changed files with 43 additions and 34 deletions
|
|
@ -3,9 +3,7 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
from ..util.linalg import pdinv, tdot, dpotrs, dtrtrs
|
||||||
from .. import kern
|
|
||||||
from ..util.linalg import pdinv, mdot, tdot, dpotrs, dtrtrs
|
|
||||||
from ..likelihoods import EP
|
from ..likelihoods import EP
|
||||||
from gp_base import GPBase
|
from gp_base import GPBase
|
||||||
|
|
||||||
|
|
@ -35,6 +33,7 @@ class GP(GPBase):
|
||||||
#self._set_params(self._get_params())
|
#self._set_params(self._get_params())
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
|
super(GP, self).parameters_changed()
|
||||||
self.K = self.kern.K(self.X)
|
self.K = self.kern.K(self.X)
|
||||||
self.K += self.likelihood.covariance_matrix
|
self.K += self.likelihood.covariance_matrix
|
||||||
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
|
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
|
||||||
|
|
@ -51,11 +50,11 @@ class GP(GPBase):
|
||||||
tmp, _ = dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
tmp, _ = dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
||||||
self.dL_dK = 0.5 * (tmp - self.output_dim * self.Ki)
|
self.dL_dK = 0.5 * (tmp - self.output_dim * self.Ki)
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
|
# return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
|
||||||
|
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
# return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||||
|
|
||||||
def update_likelihood_approximation(self, **kwargs):
|
def update_likelihood_approximation(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
|
@ -66,7 +65,7 @@ class GP(GPBase):
|
||||||
"""
|
"""
|
||||||
self.likelihood.restart()
|
self.likelihood.restart()
|
||||||
self.likelihood.fit_full(self.kern.K(self.X), **kwargs)
|
self.likelihood.fit_full(self.kern.K(self.X), **kwargs)
|
||||||
self._set_params(self._get_params()) # update the GP
|
# self._set_params(self._get_params()) # update the GP
|
||||||
|
|
||||||
def _model_fit_term(self):
|
def _model_fit_term(self):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import numpy as np
|
||||||
from .. import kern
|
from .. import kern
|
||||||
from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D
|
from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D
|
||||||
import pylab as pb
|
import pylab as pb
|
||||||
from GPy.core.model import Model
|
from model import Model
|
||||||
import warnings
|
import warnings
|
||||||
from ..likelihoods import Gaussian, Gaussian_Mixed_Noise
|
from ..likelihoods import Gaussian, Gaussian_Mixed_Noise
|
||||||
|
|
||||||
|
|
@ -12,6 +12,8 @@ class GPBase(Model):
|
||||||
sparse_GP and GP models.
|
sparse_GP and GP models.
|
||||||
"""
|
"""
|
||||||
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
||||||
|
super(GPBase, self).__init__()
|
||||||
|
|
||||||
self.X = X
|
self.X = X
|
||||||
assert len(self.X.shape) == 2
|
assert len(self.X.shape) == 2
|
||||||
self.num_data, self.input_dim = self.X.shape
|
self.num_data, self.input_dim = self.X.shape
|
||||||
|
|
@ -29,10 +31,9 @@ class GPBase(Model):
|
||||||
self._Xoffset = np.zeros((1, self.input_dim))
|
self._Xoffset = np.zeros((1, self.input_dim))
|
||||||
self._Xscale = np.ones((1, self.input_dim))
|
self._Xscale = np.ones((1, self.input_dim))
|
||||||
|
|
||||||
self.set_as_parameter(self.kern._parameters_)
|
self.set_as_parameters(*self.kern._parameters_, highest_parent=self)
|
||||||
self.set_as_parameter(self.likelihood._parameters_)
|
self.set_as_parameters(*self.likelihood._parameters_, highest_parent=self)
|
||||||
|
|
||||||
super(GPBase, self).__init__()
|
|
||||||
# Model.__init__(self)
|
# Model.__init__(self)
|
||||||
# All leaf nodes should call self._set_params(self._get_params()) at
|
# All leaf nodes should call self._set_params(self._get_params()) at
|
||||||
# the end
|
# the end
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ from .. import likelihoods
|
||||||
from ..inference import optimization
|
from ..inference import optimization
|
||||||
from ..util.linalg import jitchol
|
from ..util.linalg import jitchol
|
||||||
from ..util.misc import opt_wrapper
|
from ..util.misc import opt_wrapper
|
||||||
from parameterized import Parameterized
|
from parameterized import Parameterized, __fixed__
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from domains import _POSITIVE, _REAL
|
from domains import _POSITIVE, _REAL
|
||||||
|
|
@ -18,7 +18,8 @@ class Model(Parameterized):
|
||||||
_fail_count = 0 # Count of failed optimization steps (see objective)
|
_fail_count = 0 # Count of failed optimization steps (see objective)
|
||||||
_allowed_failures = 10 # number of allowed failures
|
_allowed_failures = 10 # number of allowed failures
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Parameterized.__init__(self)
|
super(Model, self).__init__()#Parameterized.__init__(self)
|
||||||
|
self.priors = []
|
||||||
self._priors = ParameterIndexOperations()
|
self._priors = ParameterIndexOperations()
|
||||||
self.optimization_runs = []
|
self.optimization_runs = []
|
||||||
self.sampling_runs = []
|
self.sampling_runs = []
|
||||||
|
|
@ -151,16 +152,20 @@ class Model(Parameterized):
|
||||||
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
|
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def _transform_gradients(self, g):
|
def _transform_gradients(self, g):
|
||||||
x = self._get_params()
|
x = self._get_params()
|
||||||
for index, constraint in zip(self.constrained_indices, self.constraints):
|
for constraint, index in self._constraints_.iteritems():
|
||||||
g[index] = g[index] * constraint.gradfactor(x[index])
|
if constraint != __fixed__:
|
||||||
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
|
g[index] = g[index] * constraint.gradfactor(x[index])
|
||||||
if len(self.tied_indices) or len(self.fixed_indices):
|
#[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
|
||||||
to_remove = np.hstack((self.fixed_indices + [t[1:] for t in self.tied_indices]))
|
[np.put(g, i, v) for i, v in [[i, t.sum()] for p in self._parameters_ for t,i in p._tied_to_me_.iteritems()]]
|
||||||
return np.delete(g, to_remove)
|
# if len(self.tied_indices) or len(self.fixed_indices):
|
||||||
else:
|
# to_remove = np.hstack((self.fixed_indices + [t[1:] for t in self.tied_indices]))
|
||||||
return g
|
# return np.delete(g, to_remove)
|
||||||
|
# else:
|
||||||
|
if self._fixes_ is not None: return g[self._fixes_]
|
||||||
|
return g
|
||||||
|
|
||||||
def randomize(self):
|
def randomize(self):
|
||||||
"""
|
"""
|
||||||
|
|
@ -262,14 +267,18 @@ class Model(Parameterized):
|
||||||
"""
|
"""
|
||||||
positive_strings = ['variance', 'lengthscale', 'precision', 'kappa']
|
positive_strings = ['variance', 'lengthscale', 'precision', 'kappa']
|
||||||
# param_names = self._get_param_names()
|
# param_names = self._get_param_names()
|
||||||
currently_constrained = self.all_constrained_indices()
|
|
||||||
to_make_positive = []
|
|
||||||
for s in positive_strings:
|
for s in positive_strings:
|
||||||
for i in self.grep_param_names(".*" + s):
|
paramlist = self.grep_param_names(".*"+s)
|
||||||
if not (i in currently_constrained):
|
if paramlist:
|
||||||
to_make_positive.append(i)
|
self.__getitem__(None, paramlist).constrain_positive(warning=warning)
|
||||||
if len(to_make_positive):
|
# currently_constrained = self.all_constrained_indices()
|
||||||
self.constrain_positive(np.asarray(to_make_positive), warning=warning)
|
# to_make_positive = []
|
||||||
|
# for s in positive_strings:
|
||||||
|
# for i in self.grep_param_names(".*" + s):
|
||||||
|
# if not (i in currently_constrained):
|
||||||
|
# to_make_positive.append(i)
|
||||||
|
# if len(to_make_positive):
|
||||||
|
# self.constrain_positive(np.asarray(to_make_positive), warning=warning)
|
||||||
|
|
||||||
def objective_function(self, x):
|
def objective_function(self, x):
|
||||||
"""
|
"""
|
||||||
|
|
@ -277,7 +286,7 @@ class Model(Parameterized):
|
||||||
the likelihood and the priors.
|
the likelihood and the priors.
|
||||||
|
|
||||||
Failures are handled robustly. The algorithm will try several times to
|
Failures are handled robustly. The algorithm will try several times to
|
||||||
return the objective, and will raise the original exception if it
|
return the objective, and will raise the original exception if
|
||||||
the objective cannot be computed.
|
the objective cannot be computed.
|
||||||
|
|
||||||
:param x: the parameters of the model.
|
:param x: the parameters of the model.
|
||||||
|
|
@ -298,7 +307,7 @@ class Model(Parameterized):
|
||||||
Gets the gradients from the likelihood and the priors.
|
Gets the gradients from the likelihood and the priors.
|
||||||
|
|
||||||
Failures are handled robustly. The algorithm will try several times to
|
Failures are handled robustly. The algorithm will try several times to
|
||||||
return the gradients, and will raise the original exception if it
|
return the gradients, and will raise the original exception if
|
||||||
the objective cannot be computed.
|
the objective cannot be computed.
|
||||||
|
|
||||||
:param x: the parameters of the model.
|
:param x: the parameters of the model.
|
||||||
|
|
@ -345,8 +354,8 @@ class Model(Parameterized):
|
||||||
:type max_f_eval: int
|
:type max_f_eval: int
|
||||||
:messages: whether to display during optimisation
|
:messages: whether to display during optimisation
|
||||||
:type messages: bool
|
:type messages: bool
|
||||||
:param optimzer: which optimizer to use (defaults to self.preferred optimizer)
|
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
|
||||||
:type optimzer: string TODO: valid strings?
|
:type optimizer: string TODO: valid strings?
|
||||||
"""
|
"""
|
||||||
if optimizer is None:
|
if optimizer is None:
|
||||||
optimizer = self.preferred_optimizer
|
optimizer = self.preferred_optimizer
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ class GPRegression(GP):
|
||||||
|
|
||||||
likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y)
|
||||||
|
|
||||||
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
super(GPRegression, self).__init__(X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self.ensure_default_constraints()
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue