From 58a05f37b768bd7e0ce4c861f9685a1f6c277e8e Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 15 May 2014 14:06:00 +0100 Subject: [PATCH] [latentfunctioninference] superclass LatentFunctionInference added, which contains a call just before and just after optimization --- GPy/core/gp.py | 20 ++++++++++- GPy/core/model.py | 2 +- .../latent_function_inference/__init__.py | 35 +++++++++++++++++-- .../latent_function_inference/dtc.py | 3 +- .../exact_gaussian_inference.py | 3 +- .../expectation_propagation.py | 3 +- .../latent_function_inference/fitc.py | 3 +- .../latent_function_inference/laplace.py | 3 +- .../latent_function_inference/var_dtc.py | 5 +-- .../latent_function_inference/var_dtc_gpu.py | 3 +- .../var_dtc_parallel.py | 3 +- 11 files changed, 69 insertions(+), 14 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 62e16de1..29d9032a 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -10,7 +10,7 @@ from model import Model from parameterization import ObsAr from .. import likelihoods from ..likelihoods.gaussian import Gaussian -from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation +from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation, LatentFunctionInference from parameterization.variational import VariationalPosterior class GP(Model): @@ -21,6 +21,7 @@ class GP(Model): :param Y: output observations :param kernel: a GPy kernel, defaults to rbf+white :param likelihood: a GPy likelihood + :param :class:`~GPy.inference.latent_function_inference.LatentFunctionInference` inference_method: The inference method to use for this GP :rtype: model object .. Note:: Multiple independent outputs are allowed using columns of Y @@ -220,3 +221,20 @@ class GP(Model): """ return self.kern.input_sensitivity() + def optimize(self, optimizer=None, start=None, **kwargs): + """ + Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. + kwargs are passed to the optimizer. They can be: + + :param max_f_eval: maximum number of function evaluations + :type max_f_eval: int + :messages: whether to display during optimisation + :type messages: bool + :param optimizer: which optimizer to use (defaults to self.preferred optimizer) + :type optimizer: string + + TODO: valid args + """ + self.inference_method.on_optimization_start() + super(GP, self).optimize(optimizer, start, **kwargs) + self.inference_method.on_optimization_end() \ No newline at end of file diff --git a/GPy/core/model.py b/GPy/core/model.py index 38e8d4cf..71b21af6 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -220,7 +220,7 @@ class Model(Parameterized): if self.is_fixed: raise RuntimeError, "Cannot optimize, when everything is fixed" if self.size == 0: - raise RuntimeError, "Model without parameters cannot be minimized" + raise RuntimeError, "Model without parameters cannot be optimized" if optimizer is None: optimizer = self.preferred_optimizer diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index 68004a08..878c1e4f 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -25,6 +25,20 @@ etc. """ +class LatentFunctionInference(object): + def on_optimization_start(self): + """ + This function gets called, just before the optimization loop to start. + """ + pass + + def on_optimization_end(self): + """ + This function gets called, just after the optimization loop ended. + """ + pass + + from exact_gaussian_inference import ExactGaussianInference from laplace import Laplace from GPy.inference.latent_function_inference.var_dtc import VarDTC @@ -38,11 +52,26 @@ from var_dtc_gpu import VarDTC_GPU # class FullLatentFunctionData(object): # # -# class LatentFunctionInference(object): -# def inference(self, kern, X, likelihood, Y, Y_metadata=None): + +# class EMLikeLatentFunctionInference(LatentFunctionInference): +# def update_approximation(self): +# """ +# This function gets called when the +# """ +# +# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): # """ # Do inference on the latent functions given a covariance function `kern`, -# inputs and outputs `X` and `Y`, and a likelihood `likelihood`. +# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`. +# Additional metadata for the outputs `Y` can be given in `Y_metadata`. +# """ +# raise NotImplementedError, "Abstract base class for full inference" +# +# class VariationalLatentFunctionInference(LatentFunctionInference): +# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): +# """ +# Do inference on the latent functions given a covariance function `kern`, +# inputs and outputs `X` and `Y`, inducing_inputs `Z`, and a likelihood `likelihood`. # Additional metadata for the outputs `Y` can be given in `Y_metadata`. # """ # raise NotImplementedError, "Abstract base class for full inference" diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 1a84da6b..1b6b1dbd 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -4,9 +4,10 @@ from posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv import numpy as np +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class DTC(object): +class DTC(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index c0177e9f..0c02efe3 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -5,10 +5,11 @@ from posterior import Posterior from ...util.linalg import pdinv, dpotrs, tdot from ...util import diag import numpy as np +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class ExactGaussianInference(object): +class ExactGaussianInference(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian. diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py index 172f43fb..c2dea824 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -1,9 +1,10 @@ import numpy as np from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs from posterior import Posterior +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class EP(object): +class EP(LatentFunctionInference): def __init__(self, epsilon=1e-6, eta=1., delta=1.): """ The expectation-propagation algorithm. diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index de47e5d5..a184c6c4 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -5,9 +5,10 @@ from posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv from ...util import diag import numpy as np +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class FITC(object): +class FITC(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 9ba3f83f..1c153518 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -16,8 +16,9 @@ from ...util.misc import param_to_array from posterior import Posterior import warnings from scipy import optimize +from . import LatentFunctionInference -class Laplace(object): +class Laplace(LatentFunctionInference): def __init__(self): """ diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 0cc841ed..3043a7e8 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -7,9 +7,10 @@ from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np from ...util.misc import param_to_array +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class VarDTC(object): +class VarDTC(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. @@ -190,7 +191,7 @@ class VarDTC(object): post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm) return post, log_marginal, grad_dict -class VarDTCMissingData(object): +class VarDTCMissingData(LatentFunctionInference): const_jitter = 1e-6 def __init__(self, limit=1, inan=None): from ...util.caching import Cacher diff --git a/GPy/inference/latent_function_inference/var_dtc_gpu.py b/GPy/inference/latent_function_inference/var_dtc_gpu.py index 9b2da1c9..d346d01f 100644 --- a/GPy/inference/latent_function_inference/var_dtc_gpu.py +++ b/GPy/inference/latent_function_inference/var_dtc_gpu.py @@ -7,6 +7,7 @@ from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np from ...util.misc import param_to_array +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) from ...util import gpu_init @@ -19,7 +20,7 @@ try: except: pass -class VarDTC_GPU(object): +class VarDTC_GPU(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index 87236e2a..ffda0aba 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -7,9 +7,10 @@ from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np from ...util.misc import param_to_array +from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) -class VarDTC_minibatch(object): +class VarDTC_minibatch(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference.