mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 04:22:38 +02:00
[classification] sparse gp classification and dtc update
This commit is contained in:
parent
4ea5ebaa68
commit
1d354f5cce
14 changed files with 208 additions and 369 deletions
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
from .gp_regression import GPRegression
|
||||
from .gp_classification import GPClassification
|
||||
from .sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput
|
||||
from .sparse_gp_classification import SparseGPClassification
|
||||
from .sparse_gp_regression import SparseGPRegression
|
||||
from .sparse_gp_classification import SparseGPClassification, SparseGPClassificationUncertainInput
|
||||
from .gplvm import GPLVM
|
||||
from .bcgplvm import BCGPLVM
|
||||
from .sparse_gplvm import SparseGPLVM
|
||||
|
|
|
|||
|
|
@ -81,11 +81,6 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
"""Get the gradients of the posterior distribution of X in its specific form."""
|
||||
return X.mean.gradient, X.variance.gradient
|
||||
|
||||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kw):
|
||||
posterior, log_marginal_likelihood, grad_dict = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm,
|
||||
psi0=psi0, psi1=psi1, psi2=psi2, **kw)
|
||||
return posterior, log_marginal_likelihood, grad_dict
|
||||
|
||||
def _outer_values_update(self, full_values):
|
||||
"""
|
||||
Here you put the values, which were collected before in the right places.
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from ..inference.latent_function_inference import expectation_propagation_dtc
|
|||
|
||||
class SparseGPClassification(SparseGP):
|
||||
"""
|
||||
sparse Gaussian Process model for classification
|
||||
Sparse Gaussian Process model for classification
|
||||
|
||||
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
|
||||
|
||||
|
|
@ -27,10 +27,7 @@ class SparseGPClassification(SparseGP):
|
|||
|
||||
"""
|
||||
|
||||
#def __init__(self, X, Y=None, likelihood=None, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10):
|
||||
def __init__(self, X, Y=None, likelihood=None, kernel=None, Z=None, num_inducing=10, Y_metadata=None):
|
||||
|
||||
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(X.shape[1])
|
||||
|
||||
|
|
@ -43,4 +40,56 @@ class SparseGPClassification(SparseGP):
|
|||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=expectation_propagation_dtc.EPDTC(), name='SparseGPClassification',Y_metadata=Y_metadata)
|
||||
#def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, name='sparse gp', Y_metadata=None):
|
||||
|
||||
class SparseGPClassificationUncertainInput(SparseGP):
|
||||
"""
|
||||
Sparse Gaussian Process model for classification with uncertain inputs.
|
||||
|
||||
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
|
||||
|
||||
:param X: input observations
|
||||
:type X: np.ndarray (num_data x input_dim)
|
||||
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
|
||||
:type X_variance: np.ndarray (num_data x input_dim)
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
:type Z: np.ndarray (num_inducing x input_dim) | None
|
||||
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
|
||||
:type num_inducing: int
|
||||
:rtype: model object
|
||||
|
||||
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
|
||||
.. Note:: Multiple independent outputs are allowed using columns of Y
|
||||
"""
|
||||
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, Y_metadata=None, normalizer=None):
|
||||
from ..core.parameterization.variational import NormalPosterior
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(X.shape[1])
|
||||
|
||||
likelihood = likelihoods.Bernoulli()
|
||||
|
||||
if Z is None:
|
||||
i = np.random.permutation(X.shape[0])[:num_inducing]
|
||||
Z = X[i].copy()
|
||||
else:
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
X = NormalPosterior(X, X_variance)
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood,
|
||||
inference_method=expectation_propagation_dtc.EPDTC(),
|
||||
name='SparseGPClassification', Y_metadata=Y_metadata, normalizer=normalizer)
|
||||
|
||||
def parameters_changed(self):
|
||||
#Compute the psi statistics for N once, but don't sum out N in psi2
|
||||
self.psi0 = self.kern.psi0(self.Z, self.X)
|
||||
self.psi1 = self.kern.psi1(self.Z, self.X)
|
||||
self.psi2 = self.kern.psi2n(self.Z, self.X)
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||
self._update_gradients()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -99,13 +99,8 @@ class SparseGPMiniBatch(SparseGP):
|
|||
like them into this dictionary for inner use of the indices inside the
|
||||
algorithm.
|
||||
"""
|
||||
if psi2 is None:
|
||||
psi2_sum_n = None
|
||||
else:
|
||||
psi2_sum_n = psi2.sum(axis=0)
|
||||
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
|
||||
dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2_sum_n, **kwargs)
|
||||
return posterior, log_marginal_likelihood, grad_dict
|
||||
return self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
|
||||
dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2, **kwargs)
|
||||
|
||||
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from .. import likelihoods
|
|||
from .. import kern
|
||||
from ..inference.latent_function_inference import VarDTC
|
||||
from ..core.parameterization.variational import NormalPosterior
|
||||
from GPy.inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
|
||||
|
||||
class SparseGPRegression(SparseGP_MPI):
|
||||
"""
|
||||
|
|
@ -18,6 +17,7 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
This is a thin wrapper around the SparseGP class, with a set of sensible defalts
|
||||
|
||||
:param X: input observations
|
||||
:param X_variance: input uncertainties, one per input X
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
|
|
@ -49,7 +49,7 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
|
||||
if not (X_variance is None):
|
||||
X = NormalPosterior(X,X_variance)
|
||||
|
||||
|
||||
if mpi_comm is not None:
|
||||
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
|
||||
infr = VarDTC_minibatch(mpi_comm=mpi_comm)
|
||||
|
|
@ -63,47 +63,4 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
if isinstance(self.inference_method,VarDTC_minibatch):
|
||||
update_gradients_sparsegp(self, mpi_comm=self.mpi_comm)
|
||||
else:
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
|
||||
class SparseGPRegressionUncertainInput(SparseGP):
|
||||
"""
|
||||
Gaussian Process model for regression with Gaussian variance on the inputs (X_variance)
|
||||
|
||||
This is a thin wrapper around the SparseGP class, with a set of sensible defalts
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, normalizer=None):
|
||||
"""
|
||||
:param X: input observations
|
||||
:type X: np.ndarray (num_data x input_dim)
|
||||
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
|
||||
:type X_variance: np.ndarray (num_data x input_dim)
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
:type Z: np.ndarray (num_inducing x input_dim) | None
|
||||
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
|
||||
:type num_inducing: int
|
||||
:rtype: model object
|
||||
|
||||
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
|
||||
.. Note:: Multiple independent outputs are allowed using columns of Y
|
||||
"""
|
||||
num_data, input_dim = X.shape
|
||||
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(input_dim) + kern.White(input_dim, variance=1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
i = np.random.permutation(num_data)[:min(num_inducing, num_data)]
|
||||
Z = X[i].copy()
|
||||
else:
|
||||
assert Z.shape[1] == input_dim
|
||||
|
||||
likelihood = likelihoods.Gaussian()
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, X_variance=X_variance, inference_method=VarDTC(), normalizer=normalizer)
|
||||
self.ensure_default_constraints()
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
Loading…
Add table
Add a link
Reference in a new issue