mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
Merge remote-tracking branch 'ShefML/devel' into warped_gps_fixes
This commit is contained in:
commit
273009f9ad
91 changed files with 1147 additions and 3781 deletions
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
from .gp_regression import GPRegression
|
||||
from .gp_classification import GPClassification
|
||||
from .sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput
|
||||
from .sparse_gp_classification import SparseGPClassification
|
||||
from .sparse_gp_regression import SparseGPRegression
|
||||
from .sparse_gp_classification import SparseGPClassification, SparseGPClassificationUncertainInput
|
||||
from .gplvm import GPLVM
|
||||
from .bcgplvm import BCGPLVM
|
||||
from .sparse_gplvm import SparseGPLVM
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
dL_dpsi2=self.grad_dict['dL_dpsi2'])
|
||||
|
||||
self.variational_prior.update_gradients_KL(self.X)
|
||||
|
||||
self._Xgrad = self.X.gradient.copy()
|
||||
|
||||
#super(BayesianGPLVM, self).parameters_changed()
|
||||
#self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
||||
|
|
|
|||
|
|
@ -81,11 +81,6 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
"""Get the gradients of the posterior distribution of X in its specific form."""
|
||||
return X.mean.gradient, X.variance.gradient
|
||||
|
||||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kw):
|
||||
posterior, log_marginal_likelihood, grad_dict = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm,
|
||||
psi0=psi0, psi1=psi1, psi2=psi2, **kw)
|
||||
return posterior, log_marginal_likelihood, grad_dict
|
||||
|
||||
def _outer_values_update(self, full_values):
|
||||
"""
|
||||
Here you put the values, which were collected before in the right places.
|
||||
|
|
|
|||
|
|
@ -361,7 +361,7 @@ class SkewChecker(HessianChecker):
|
|||
|
||||
#Check every block individually (for ease)
|
||||
check_passed = [False]*numeric_hess.shape[2]
|
||||
for block_ind in xrange(numeric_hess.shape[2]):
|
||||
for block_ind in range(numeric_hess.shape[2]):
|
||||
#Unless super_plot is set, just plot the first one
|
||||
p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False
|
||||
if verbose:
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ from ..util.initialization import initialize_latent
|
|||
from ..core.sparse_gp import SparseGP, GP
|
||||
from GPy.core.parameterization.variational import VariationalPosterior
|
||||
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
|
||||
from GPy.models.bayesian_gplvm import BayesianGPLVM
|
||||
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
|
||||
|
||||
class MRD(BayesianGPLVMMiniBatch):
|
||||
|
|
@ -170,7 +171,8 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
self._log_marginal_likelihood += b._log_marginal_likelihood
|
||||
|
||||
self.logger.info('working on im <{}>'.format(hex(id(i))))
|
||||
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
|
||||
self.Z.gradient[:] += b._Zgrad # b.Z.gradient # full_values['Zgrad']
|
||||
|
||||
#grad_dict = b.full_values
|
||||
|
||||
if self.has_uncertain_inputs():
|
||||
|
|
|
|||
|
|
@ -6,12 +6,11 @@ import numpy as np
|
|||
from ..core import SparseGP
|
||||
from .. import likelihoods
|
||||
from .. import kern
|
||||
from ..likelihoods import likelihood
|
||||
from ..inference.latent_function_inference import expectation_propagation_dtc
|
||||
from ..inference.latent_function_inference import EPDTC
|
||||
|
||||
class SparseGPClassification(SparseGP):
|
||||
"""
|
||||
sparse Gaussian Process model for classification
|
||||
Sparse Gaussian Process model for classification
|
||||
|
||||
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
|
||||
|
||||
|
|
@ -27,10 +26,7 @@ class SparseGPClassification(SparseGP):
|
|||
|
||||
"""
|
||||
|
||||
#def __init__(self, X, Y=None, likelihood=None, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10):
|
||||
def __init__(self, X, Y=None, likelihood=None, kernel=None, Z=None, num_inducing=10, Y_metadata=None):
|
||||
|
||||
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(X.shape[1])
|
||||
|
||||
|
|
@ -42,5 +38,57 @@ class SparseGPClassification(SparseGP):
|
|||
else:
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=expectation_propagation_dtc.EPDTC(), name='SparseGPClassification',Y_metadata=Y_metadata)
|
||||
#def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, name='sparse gp', Y_metadata=None):
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=EPDTC(), name='SparseGPClassification',Y_metadata=Y_metadata)
|
||||
|
||||
class SparseGPClassificationUncertainInput(SparseGP):
|
||||
"""
|
||||
Sparse Gaussian Process model for classification with uncertain inputs.
|
||||
|
||||
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
|
||||
|
||||
:param X: input observations
|
||||
:type X: np.ndarray (num_data x input_dim)
|
||||
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
|
||||
:type X_variance: np.ndarray (num_data x input_dim)
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
:type Z: np.ndarray (num_inducing x input_dim) | None
|
||||
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
|
||||
:type num_inducing: int
|
||||
:rtype: model object
|
||||
|
||||
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
|
||||
.. Note:: Multiple independent outputs are allowed using columns of Y
|
||||
"""
|
||||
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, Y_metadata=None, normalizer=None):
|
||||
from ..core.parameterization.variational import NormalPosterior
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(X.shape[1])
|
||||
|
||||
likelihood = likelihoods.Bernoulli()
|
||||
|
||||
if Z is None:
|
||||
i = np.random.permutation(X.shape[0])[:num_inducing]
|
||||
Z = X[i].copy()
|
||||
else:
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
X = NormalPosterior(X, X_variance)
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood,
|
||||
inference_method=EPDTC(),
|
||||
name='SparseGPClassification', Y_metadata=Y_metadata, normalizer=normalizer)
|
||||
|
||||
def parameters_changed(self):
|
||||
#Compute the psi statistics for N once, but don't sum out N in psi2
|
||||
self.psi0 = self.kern.psi0(self.Z, self.X)
|
||||
self.psi1 = self.kern.psi1(self.Z, self.X)
|
||||
self.psi2 = self.kern.psi2n(self.Z, self.X)
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||
self._update_gradients()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -99,13 +99,8 @@ class SparseGPMiniBatch(SparseGP):
|
|||
like them into this dictionary for inner use of the indices inside the
|
||||
algorithm.
|
||||
"""
|
||||
if psi2 is None:
|
||||
psi2_sum_n = None
|
||||
else:
|
||||
psi2_sum_n = psi2.sum(axis=0)
|
||||
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
|
||||
dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2_sum_n, **kwargs)
|
||||
return posterior, log_marginal_likelihood, grad_dict
|
||||
return self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
|
||||
dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2, **kwargs)
|
||||
|
||||
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
|
||||
"""
|
||||
|
|
@ -326,3 +321,4 @@ class SparseGPMiniBatch(SparseGP):
|
|||
else:
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
||||
self._outer_values_update(self.grad_dict)
|
||||
self._Zgrad = self.Z.gradient.copy()
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from .. import likelihoods
|
|||
from .. import kern
|
||||
from ..inference.latent_function_inference import VarDTC
|
||||
from ..core.parameterization.variational import NormalPosterior
|
||||
from GPy.inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
|
||||
|
||||
class SparseGPRegression(SparseGP_MPI):
|
||||
"""
|
||||
|
|
@ -18,6 +17,7 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
This is a thin wrapper around the SparseGP class, with a set of sensible defalts
|
||||
|
||||
:param X: input observations
|
||||
:param X_variance: input uncertainties, one per input X
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
|
|
@ -49,7 +49,7 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
|
||||
if not (X_variance is None):
|
||||
X = NormalPosterior(X,X_variance)
|
||||
|
||||
|
||||
if mpi_comm is not None:
|
||||
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
|
||||
infr = VarDTC_minibatch(mpi_comm=mpi_comm)
|
||||
|
|
@ -63,47 +63,4 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
if isinstance(self.inference_method,VarDTC_minibatch):
|
||||
update_gradients_sparsegp(self, mpi_comm=self.mpi_comm)
|
||||
else:
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
|
||||
class SparseGPRegressionUncertainInput(SparseGP):
|
||||
"""
|
||||
Gaussian Process model for regression with Gaussian variance on the inputs (X_variance)
|
||||
|
||||
This is a thin wrapper around the SparseGP class, with a set of sensible defalts
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, normalizer=None):
|
||||
"""
|
||||
:param X: input observations
|
||||
:type X: np.ndarray (num_data x input_dim)
|
||||
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
|
||||
:type X_variance: np.ndarray (num_data x input_dim)
|
||||
:param Y: observed values
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
:type Z: np.ndarray (num_inducing x input_dim) | None
|
||||
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
|
||||
:type num_inducing: int
|
||||
:rtype: model object
|
||||
|
||||
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
|
||||
.. Note:: Multiple independent outputs are allowed using columns of Y
|
||||
"""
|
||||
num_data, input_dim = X.shape
|
||||
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(input_dim) + kern.White(input_dim, variance=1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
i = np.random.permutation(num_data)[:min(num_inducing, num_data)]
|
||||
Z = X[i].copy()
|
||||
else:
|
||||
assert Z.shape[1] == input_dim
|
||||
|
||||
likelihood = likelihoods.Gaussian()
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, X_variance=X_variance, inference_method=VarDTC(), normalizer=normalizer)
|
||||
self.ensure_default_constraints()
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
Loading…
Add table
Add a link
Reference in a new issue