mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
Allowing EP in BGPLVM and MRD
This commit is contained in:
parent
afcb30dfbe
commit
c3ab4b7979
3 changed files with 46 additions and 38 deletions
|
|
@ -26,11 +26,11 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
:type init: 'PCA'|'random'
|
||||
|
||||
"""
|
||||
def __init__(self, Y, Q, X=None, X_variance=None, init='PCA', M=10,
|
||||
def __init__(self, likelihood, Q, X=None, X_variance=None, init='PCA', M=10,
|
||||
Z=None, kernel=None, oldpsave=10, _debug=False,
|
||||
**kwargs):
|
||||
if X == None:
|
||||
X = self.initialise_latent(init, Q, Y)
|
||||
X = self.initialise_latent(init, Q, likelihood.Y)
|
||||
|
||||
if X_variance is None:
|
||||
X_variance = np.clip((np.ones_like(X) * 0.5) + .01 * np.random.randn(*X.shape), 0.001, 1)
|
||||
|
|
@ -56,7 +56,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
self._savedpsiKmm = []
|
||||
self._savedABCD = []
|
||||
|
||||
sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
|
||||
@property
|
||||
def oldps(self):
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ import pylab
|
|||
class MRD(model):
|
||||
"""
|
||||
Do MRD on given Datasets in Ylist.
|
||||
All Ys in Ylist are in [N x Dn], where Dn can be different per Yn,
|
||||
All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn,
|
||||
N must be shared across datasets though.
|
||||
|
||||
:param Ylist...: observed datasets
|
||||
:type Ylist: [np.ndarray]
|
||||
:param likelihood_list...: likelihoods of observed datasets
|
||||
:type likelihood_list: [GPy.likelihood]
|
||||
:param names: names for different gplvm models
|
||||
:type names: [str]
|
||||
:param Q: latent dimensionality (will raise
|
||||
|
|
@ -41,8 +41,9 @@ class MRD(model):
|
|||
:param kernel:
|
||||
kernel to use
|
||||
"""
|
||||
|
||||
def __init__(self, *Ylist, **kwargs):
|
||||
#TODO allow different kernels for different outputs
|
||||
#def __init__(self, *Ylist, **kwargs):
|
||||
def __init__(self, *likelihood_list, **kwargs):
|
||||
if kwargs.has_key("_debug"):
|
||||
self._debug = kwargs['_debug']
|
||||
del kwargs['_debug']
|
||||
|
|
@ -52,7 +53,7 @@ class MRD(model):
|
|||
self.names = kwargs['names']
|
||||
del kwargs['names']
|
||||
else:
|
||||
self.names = ["{}".format(i + 1) for i in range(len(Ylist))]
|
||||
self.names = ["{}".format(i + 1) for i in range(len(likelihood_list))]
|
||||
if kwargs.has_key('kernel'):
|
||||
kernel = kwargs['kernel']
|
||||
k = lambda: kernel.copy()
|
||||
|
|
@ -80,9 +81,10 @@ class MRD(model):
|
|||
self.M = 10
|
||||
|
||||
self._init = True
|
||||
X = self._init_X(initx, Ylist)
|
||||
X = self._init_X(initx, likelihood_list)
|
||||
Z = self._init_Z(initz, X)
|
||||
self.bgplvms = [Bayesian_GPLVM(Y, kernel=k(), X=X, Z=Z, M=self.M, **kwargs) for Y in Ylist]
|
||||
self.bgplvms = [Bayesian_GPLVM(Y, kernel=k(), X=X, Z=Z, M=self.M, **kwargs) for Y in likelihood_list]
|
||||
|
||||
del self._init
|
||||
|
||||
self.gref = self.bgplvms[0]
|
||||
|
|
@ -126,11 +128,11 @@ class MRD(model):
|
|||
if not self._init:
|
||||
raise AttributeError("bgplvm list not initialized")
|
||||
@property
|
||||
def Ylist(self):
|
||||
def likelihood_list(self):
|
||||
return [g.likelihood.Y for g in self.bgplvms]
|
||||
@Ylist.setter
|
||||
def Ylist(self, Ylist):
|
||||
for g, Y in itertools.izip(self.bgplvms, Ylist):
|
||||
@likelihood_list.setter
|
||||
def likelihood_list(self, likelihood_list):
|
||||
for g, Y in itertools.izip(self.bgplvms, likelihood_list):
|
||||
g.likelihood.Y = Y
|
||||
|
||||
@property
|
||||
|
|
@ -152,7 +154,7 @@ class MRD(model):
|
|||
|
||||
def randomize(self, initx='concat', initz='permute', *args, **kw):
|
||||
super(MRD, self).randomize(*args, **kw)
|
||||
self._init_X(initx, self.Ylist)
|
||||
self._init_X(initx, self.likelihood_list)
|
||||
self._init_Z(initz, self.X)
|
||||
|
||||
def _get_param_names(self):
|
||||
|
|
@ -225,6 +227,10 @@ class MRD(model):
|
|||
# g._computations()
|
||||
|
||||
|
||||
def update_likelihood_approximation(self):#TODO: object oriented vs script base
|
||||
for bgplvm in self.bgplvms:
|
||||
bgplvm.update_likelihood_approximation()
|
||||
|
||||
def log_likelihood(self):
|
||||
ll = -self.gref.KL_divergence()
|
||||
for g in self.bgplvms:
|
||||
|
|
@ -246,17 +252,18 @@ class MRD(model):
|
|||
partial=g.partial_for_likelihood)]) \
|
||||
for g in self.bgplvms])))
|
||||
|
||||
def _init_X(self, init='PCA', Ylist=None):
|
||||
if Ylist is None:
|
||||
Ylist = self.Ylist
|
||||
def _init_X(self, init='PCA', likelihood_list=None):
|
||||
if likelihood_list is None:
|
||||
likelihood_list = self.likelihood_list
|
||||
if init in "PCA_single":
|
||||
X = numpy.zeros((Ylist[0].shape[0], self.Q))
|
||||
for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.Q), len(Ylist)), Ylist):
|
||||
X[:, qs] = PCA(Y, len(qs))[0]
|
||||
X = numpy.zeros((likelihood_list[0].Y.shape[0], self.Q))
|
||||
for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.Q), len(likelihood_list)), likelihood_list):
|
||||
X[:, qs] = PCA(Y.Y, len(qs))[0]
|
||||
elif init in "PCA_concat":
|
||||
X = PCA(numpy.hstack(Ylist), self.Q)[0]
|
||||
X = PCA(numpy.hstack([l.Y for l in likelihood_list]), self.Q)[0]
|
||||
#X = PCA(numpy.hstack(likelihood_list), self.Q)[0]
|
||||
else: # init == 'random':
|
||||
X = numpy.random.randn(Ylist[0].shape[0], self.Q)
|
||||
X = numpy.random.randn(likelihood_list[0].Y.shape[0], self.Q)
|
||||
self.X = X
|
||||
return X
|
||||
|
||||
|
|
@ -294,8 +301,8 @@ class MRD(model):
|
|||
fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.X))
|
||||
return fig
|
||||
|
||||
def plot_predict(self, fig_num="MRD Predictions", axes=None):
|
||||
fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.predict(g.X)[0]))
|
||||
def plot_predict(self, fig_num="MRD Predictions", axes=None, **kwargs):
|
||||
fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.predict(g.X)[0],**kwargs))
|
||||
return fig
|
||||
|
||||
def plot_scales(self, fig_num="MRD Scales", axes=None, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from ..util.plot import gpplot
|
|||
from .. import kern
|
||||
from GP import GP
|
||||
from scipy import linalg
|
||||
from ..likelihoods import Gaussian
|
||||
|
||||
class sparse_GP(GP):
|
||||
"""
|
||||
|
|
@ -172,19 +173,19 @@ class sparse_GP(GP):
|
|||
For a Gaussian likelihood, no iteration is required:
|
||||
this function does nothing
|
||||
"""
|
||||
if self.has_uncertain_inputs:
|
||||
|
||||
Lmi = chol_inv(self.Lm)
|
||||
Kmmi = tdot(Lmi.T)
|
||||
diag_tr_psi2Kmmi = np.array([np.trace(psi2_Kmmi) for psi2_Kmmi in np.dot(self.psi2,Kmmi)])
|
||||
|
||||
self.likelihood.fit_FITC(self.Kmm,self.psi1,diag_tr_psi2Kmmi) #This uses the fit_FITC code, but does not perfomr a FITC-EP.#TODO solve potential confusion
|
||||
#raise NotImplementedError, "EP approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_DTC(self.Kmm, self.psi1)
|
||||
# self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
if not isinstance(self.likelihood,Gaussian): #Updates not needed for Gaussian likelihood
|
||||
self.likelihood.restart() #TODO check consistency with pseudo_EP
|
||||
if self.has_uncertain_inputs:
|
||||
Lmi = chol_inv(self.Lm)
|
||||
Kmmi = tdot(Lmi.T)
|
||||
diag_tr_psi2Kmmi = np.array([np.trace(psi2_Kmmi) for psi2_Kmmi in np.dot(self.psi2,Kmmi)])
|
||||
|
||||
self.likelihood.fit_FITC(self.Kmm,self.psi1,diag_tr_psi2Kmmi) #This uses the fit_FITC code, but does not perfomr a FITC-EP.#TODO solve potential confusion
|
||||
#raise NotImplementedError, "EP approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_DTC(self.Kmm, self.psi1)
|
||||
# self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue