mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-06 02:24:17 +02:00
made the BGPLVM work in the new world order
This commit is contained in:
parent
f7d2fc6ca4
commit
2b756e96e1
3 changed files with 22 additions and 12 deletions
|
|
@ -5,10 +5,12 @@ import numpy as np
|
||||||
import pylab as pb
|
import pylab as pb
|
||||||
import sys, pdb
|
import sys, pdb
|
||||||
from GPLVM import GPLVM
|
from GPLVM import GPLVM
|
||||||
from sparse_GP_regression import sparse_GP_regression
|
from sparse_GP import sparse_GP
|
||||||
from GPy.util.linalg import pdinv
|
from GPy.util.linalg import pdinv
|
||||||
|
from ..likelihoods import Gaussian
|
||||||
|
from .. import kern
|
||||||
|
|
||||||
class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
|
class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
"""
|
"""
|
||||||
Bayesian Gaussian Process Latent Variable Model
|
Bayesian Gaussian Process Latent Variable Model
|
||||||
|
|
||||||
|
|
@ -20,15 +22,23 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
|
||||||
:type init: 'PCA'|'random'
|
:type init: 'PCA'|'random'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, Y, Q, init='PCA', **kwargs):
|
def __init__(self, Y, Q, init='PCA', M=10, Z=None, **kwargs):
|
||||||
X = self.initialise_latent(init, Q, Y)
|
X = self.initialise_latent(init, Q, Y)
|
||||||
|
|
||||||
|
if Z is None:
|
||||||
|
Z = np.random.permutation(X.copy())[:M]
|
||||||
|
else:
|
||||||
|
assert Z.shape[1]==X.shape[1]
|
||||||
|
|
||||||
|
kernel = kern.rbf(Q) + kern.white(Q)
|
||||||
|
|
||||||
S = np.ones_like(X) * 1e-2#
|
S = np.ones_like(X) * 1e-2#
|
||||||
sparse_GP_regression.__init__(self, X, Y, X_uncertainty = S, **kwargs)
|
sparse_GP.__init__(self, X, Gaussian(Y), X_uncertainty = S, Z=Z,**kwargs)
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
X_names = sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
|
X_names = sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
|
||||||
S_names = sum([['S_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
|
S_names = sum([['S_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
|
||||||
return (X_names + S_names + sparse_GP_regression._get_param_names(self))
|
return (X_names + S_names + sparse_GP._get_param_names(self))
|
||||||
|
|
||||||
def _get_params(self):
|
def _get_params(self):
|
||||||
"""
|
"""
|
||||||
|
|
@ -40,13 +50,13 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
|
||||||
===============================================================
|
===============================================================
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP_regression._get_params(self)))
|
return np.hstack((self.X.flatten(), self.X_uncertainty.flatten(), sparse_GP._get_params(self)))
|
||||||
|
|
||||||
def _set_params(self,x):
|
def _set_params(self,x):
|
||||||
N, Q = self.N, self.Q
|
N, Q = self.N, self.Q
|
||||||
self.X = x[:self.X.size].reshape(N,Q).copy()
|
self.X = x[:self.X.size].reshape(N,Q).copy()
|
||||||
self.X_uncertainty = x[(N*Q):(2*N*Q)].reshape(N,Q).copy()
|
self.X_uncertainty = x[(N*Q):(2*N*Q)].reshape(N,Q).copy()
|
||||||
sparse_GP_regression._set_params(self, x[(2*N*Q):])
|
sparse_GP._set_params(self, x[(2*N*Q):])
|
||||||
|
|
||||||
def dL_dmuS(self):
|
def dL_dmuS(self):
|
||||||
dL_dmu_psi0, dL_dS_psi0 = self.kern.dpsi1_dmuS(self.dL_dpsi1,self.Z,self.X,self.X_uncertainty)
|
dL_dmu_psi0, dL_dS_psi0 = self.kern.dpsi1_dmuS(self.dL_dpsi1,self.Z,self.X,self.X_uncertainty)
|
||||||
|
|
@ -58,5 +68,5 @@ class Bayesian_GPLVM(sparse_GP_regression, GPLVM):
|
||||||
return np.hstack((dL_dmu.flatten(), dL_dS.flatten()))
|
return np.hstack((dL_dmu.flatten(), dL_dS.flatten()))
|
||||||
|
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
return np.hstack((self.dL_dmuS().flatten(), sparse_GP_regression._log_likelihood_gradients(self)))
|
return np.hstack((self.dL_dmuS().flatten(), sparse_GP._log_likelihood_gradients(self)))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,6 @@ from sparse_GP_regression import sparse_GP_regression
|
||||||
from GPLVM import GPLVM
|
from GPLVM import GPLVM
|
||||||
from warped_GP import warpedGP
|
from warped_GP import warpedGP
|
||||||
# TODO: from generalized_FITC import generalized_FITC
|
# TODO: from generalized_FITC import generalized_FITC
|
||||||
#from sparse_GPLVM import sparse_GPLVM
|
from sparse_GPLVM import sparse_GPLVM
|
||||||
#from uncollapsed_sparse_GP import uncollapsed_sparse_GP
|
#from uncollapsed_sparse_GP import uncollapsed_sparse_GP
|
||||||
#from BGPLVM import Bayesian_GPLVM
|
from BGPLVM import Bayesian_GPLVM
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ class sparse_GP(GP):
|
||||||
# kernel computations, using BGPLVM notation
|
# kernel computations, using BGPLVM notation
|
||||||
self.Kmm = self.kern.K(self.Z)
|
self.Kmm = self.kern.K(self.Z)
|
||||||
if self.has_uncertain_inputs:
|
if self.has_uncertain_inputs:
|
||||||
self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncerTainty)
|
self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty)
|
||||||
self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T
|
self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T
|
||||||
self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty)
|
self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty)
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue