mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-01 07:46:22 +02:00
beginnings of gplvm
This commit is contained in:
parent
7bb6f4ba4e
commit
a8c760932e
1 changed files with 22 additions and 29 deletions
|
|
@ -15,61 +15,54 @@ class GPLVM(GP):
|
|||
"""
|
||||
Gaussian Process Latent Variable Model
|
||||
|
||||
:param Y: observed data
|
||||
:type Y: np.ndarray
|
||||
:param input_dim: latent dimensionality
|
||||
:type input_dim: int
|
||||
:param init: initialisation method for the latent space
|
||||
:type init: 'PCA'|'random'
|
||||
|
||||
"""
|
||||
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, normalize_Y=False, name="gplvm"):
|
||||
|
||||
"""
|
||||
:param Y: observed data
|
||||
:type Y: np.ndarray
|
||||
:param input_dim: latent dimensionality
|
||||
:type input_dim: int
|
||||
:param init: initialisation method for the latent space
|
||||
:type init: 'PCA'|'random'
|
||||
"""
|
||||
if X is None:
|
||||
X = self.initialise_latent(init, input_dim, Y)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(input_dim, ARD=input_dim > 1) + kern.bias(input_dim, np.exp(-2))
|
||||
likelihood = Gaussian(Y, normalize=normalize_Y, variance=np.exp(-2.))
|
||||
GP.__init__(self, X, likelihood, kernel, normalize_X=False, name=name)
|
||||
self.X = Param('q_mean', self.X)
|
||||
self.add_parameter(self.X, gradient=self.dK_dX, index=0)
|
||||
self.ensure_default_constraints()
|
||||
|
||||
likelihood = Gaussian()
|
||||
|
||||
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
|
||||
self.X = Param('X', X)
|
||||
self.add_parameter(self.X, ndex=0)
|
||||
|
||||
def initialise_latent(self, init, input_dim, Y):
|
||||
Xr = np.random.randn(Y.shape[0], input_dim)
|
||||
if init == 'PCA':
|
||||
PC = PCA(Y, input_dim)[0]
|
||||
Xr[:PC.shape[0], :PC.shape[1]] = PC
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return Xr
|
||||
|
||||
def parameters_changed(self):
|
||||
GP.parameters_changed(self)
|
||||
self.X.gradient = self.kern.gradients_X(self.posterior.dL_dK, self.X)
|
||||
|
||||
def _getstate(self):
|
||||
return GP._getstate(self)
|
||||
|
||||
def _setstate(self, state):
|
||||
GP._setstate(self, state)
|
||||
|
||||
# def _get_param_names(self):
|
||||
# return sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) + GP._get_param_names(self)
|
||||
#
|
||||
# def _get_params(self):
|
||||
# return np.hstack((self.X.flatten(), GP._get_params(self)))
|
||||
#
|
||||
# def _set_params(self, x):
|
||||
# self.X = x[:self.num_data * self.input_dim].reshape(self.num_data, self.input_dim).copy()
|
||||
# GP._set_params(self, x[self.X.size:])
|
||||
|
||||
def dK_dX(self):
|
||||
return self.kern.dK_dX(self.dL_dK, self.X)
|
||||
# def _log_likelihood_gradients(self):
|
||||
# dL_dX = self.kern.dK_dX(self.dL_dK, self.X)
|
||||
#
|
||||
# return np.hstack((dL_dX.flatten(), GP._log_likelihood_gradients(self)))
|
||||
|
||||
def jacobian(self,X):
|
||||
target = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
for i in range(self.output_dim):
|
||||
target[:,:,i]=self.kern.dK_dX(np.dot(self.Ki,self.likelihood.Y[:,i])[None, :],X,self.X)
|
||||
return target
|
||||
|
||||
|
||||
def magnification(self,X):
|
||||
target=np.zeros(X.shape[0])
|
||||
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue