mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
[latentfunctioninference] superclass LatentFunctionInference added, which contains a call just before and just after optimization
This commit is contained in:
parent
02b5ee1e46
commit
58a05f37b7
11 changed files with 69 additions and 14 deletions
|
|
@ -10,7 +10,7 @@ from model import Model
|
|||
from parameterization import ObsAr
|
||||
from .. import likelihoods
|
||||
from ..likelihoods.gaussian import Gaussian
|
||||
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
|
||||
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation, LatentFunctionInference
|
||||
from parameterization.variational import VariationalPosterior
|
||||
|
||||
class GP(Model):
|
||||
|
|
@ -21,6 +21,7 @@ class GP(Model):
|
|||
:param Y: output observations
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param likelihood: a GPy likelihood
|
||||
:param :class:`~GPy.inference.latent_function_inference.LatentFunctionInference` inference_method: The inference method to use for this GP
|
||||
:rtype: model object
|
||||
|
||||
.. Note:: Multiple independent outputs are allowed using columns of Y
|
||||
|
|
@ -220,3 +221,20 @@ class GP(Model):
|
|||
"""
|
||||
return self.kern.input_sensitivity()
|
||||
|
||||
def optimize(self, optimizer=None, start=None, **kwargs):
|
||||
"""
|
||||
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
|
||||
kwargs are passed to the optimizer. They can be:
|
||||
|
||||
:param max_f_eval: maximum number of function evaluations
|
||||
:type max_f_eval: int
|
||||
:messages: whether to display during optimisation
|
||||
:type messages: bool
|
||||
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
|
||||
:type optimizer: string
|
||||
|
||||
TODO: valid args
|
||||
"""
|
||||
self.inference_method.on_optimization_start()
|
||||
super(GP, self).optimize(optimizer, start, **kwargs)
|
||||
self.inference_method.on_optimization_end()
|
||||
|
|
@ -220,7 +220,7 @@ class Model(Parameterized):
|
|||
if self.is_fixed:
|
||||
raise RuntimeError, "Cannot optimize, when everything is fixed"
|
||||
if self.size == 0:
|
||||
raise RuntimeError, "Model without parameters cannot be minimized"
|
||||
raise RuntimeError, "Model without parameters cannot be optimized"
|
||||
|
||||
if optimizer is None:
|
||||
optimizer = self.preferred_optimizer
|
||||
|
|
|
|||
|
|
@ -25,6 +25,20 @@ etc.
|
|||
|
||||
"""
|
||||
|
||||
class LatentFunctionInference(object):
|
||||
def on_optimization_start(self):
|
||||
"""
|
||||
This function gets called, just before the optimization loop to start.
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_optimization_end(self):
|
||||
"""
|
||||
This function gets called, just after the optimization loop ended.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
from exact_gaussian_inference import ExactGaussianInference
|
||||
from laplace import Laplace
|
||||
from GPy.inference.latent_function_inference.var_dtc import VarDTC
|
||||
|
|
@ -38,11 +52,26 @@ from var_dtc_gpu import VarDTC_GPU
|
|||
# class FullLatentFunctionData(object):
|
||||
#
|
||||
#
|
||||
# class LatentFunctionInference(object):
|
||||
# def inference(self, kern, X, likelihood, Y, Y_metadata=None):
|
||||
|
||||
# class EMLikeLatentFunctionInference(LatentFunctionInference):
|
||||
# def update_approximation(self):
|
||||
# """
|
||||
# This function gets called when the
|
||||
# """
|
||||
#
|
||||
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
|
||||
# """
|
||||
# Do inference on the latent functions given a covariance function `kern`,
|
||||
# inputs and outputs `X` and `Y`, and a likelihood `likelihood`.
|
||||
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
|
||||
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
|
||||
# """
|
||||
# raise NotImplementedError, "Abstract base class for full inference"
|
||||
#
|
||||
# class VariationalLatentFunctionInference(LatentFunctionInference):
|
||||
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
|
||||
# """
|
||||
# Do inference on the latent functions given a covariance function `kern`,
|
||||
# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`.
|
||||
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
|
||||
# """
|
||||
# raise NotImplementedError, "Abstract base class for full inference"
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@
|
|||
from posterior import Posterior
|
||||
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
|
||||
import numpy as np
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class DTC(object):
|
||||
class DTC(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
|
||||
|
||||
|
|
|
|||
|
|
@ -5,10 +5,11 @@ from posterior import Posterior
|
|||
from ...util.linalg import pdinv, dpotrs, tdot
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
||||
class ExactGaussianInference(object):
|
||||
class ExactGaussianInference(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import numpy as np
|
||||
from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs
|
||||
from posterior import Posterior
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class EP(object):
|
||||
class EP(LatentFunctionInference):
|
||||
def __init__(self, epsilon=1e-6, eta=1., delta=1.):
|
||||
"""
|
||||
The expectation-propagation algorithm.
|
||||
|
|
|
|||
|
|
@ -5,9 +5,10 @@ from posterior import Posterior
|
|||
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class FITC(object):
|
||||
class FITC(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,8 +16,9 @@ from ...util.misc import param_to_array
|
|||
from posterior import Posterior
|
||||
import warnings
|
||||
from scipy import optimize
|
||||
from . import LatentFunctionInference
|
||||
|
||||
class Laplace(object):
|
||||
class Laplace(LatentFunctionInference):
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -7,9 +7,10 @@ from ...util import diag
|
|||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class VarDTC(object):
|
||||
class VarDTC(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
|
||||
|
||||
|
|
@ -190,7 +191,7 @@ class VarDTC(object):
|
|||
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
return post, log_marginal, grad_dict
|
||||
|
||||
class VarDTCMissingData(object):
|
||||
class VarDTCMissingData(LatentFunctionInference):
|
||||
const_jitter = 1e-6
|
||||
def __init__(self, limit=1, inan=None):
|
||||
from ...util.caching import Cacher
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from ...util import diag
|
|||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
from ...util import gpu_init
|
||||
|
|
@ -19,7 +20,7 @@ try:
|
|||
except:
|
||||
pass
|
||||
|
||||
class VarDTC_GPU(object):
|
||||
class VarDTC_GPU(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
|
||||
|
||||
|
|
|
|||
|
|
@ -7,9 +7,10 @@ from ...util import diag
|
|||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class VarDTC_minibatch(object):
|
||||
class VarDTC_minibatch(LatentFunctionInference):
|
||||
"""
|
||||
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue