mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
various merge conflicts from the newGP branch
This commit is contained in:
parent
687631f719
commit
bbc7bd8aca
4 changed files with 18 additions and 18 deletions
|
|
@ -17,7 +17,7 @@ K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||||
|
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
k = GPy.kern.linear(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||||
m.constrain_positive('(rbf|bias|noise|white|S)')
|
m.constrain_positive('(rbf|bias|noise|white|S)')
|
||||||
# m.constrain_fixed('S', 1)
|
# m.constrain_fixed('S', 1)
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
The resulting 1-D array has this structure:
|
The resulting 1-D array has this structure:
|
||||||
|
|
||||||
===============================================================
|
===============================================================
|
||||||
| mu | S | Z | beta | theta |
|
| mu | S | Z | theta | beta |
|
||||||
===============================================================
|
===============================================================
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -8,9 +8,10 @@ import sys, pdb
|
||||||
from .. import kern
|
from .. import kern
|
||||||
from ..core import model
|
from ..core import model
|
||||||
from ..util.linalg import pdinv, PCA
|
from ..util.linalg import pdinv, PCA
|
||||||
from GP_regression import GP_regression
|
from GP import GP
|
||||||
|
from ..likelihoods import Gaussian
|
||||||
|
|
||||||
class GPLVM(GP_regression):
|
class GPLVM(GP):
|
||||||
"""
|
"""
|
||||||
Gaussian Process Latent Variable Model
|
Gaussian Process Latent Variable Model
|
||||||
|
|
||||||
|
|
@ -22,10 +23,13 @@ class GPLVM(GP_regression):
|
||||||
:type init: 'PCA'|'random'
|
:type init: 'PCA'|'random'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, Y, Q, init='PCA', X = None, **kwargs):
|
def __init__(self, Y, Q, init='PCA', X = None, kernel=None, **kwargs):
|
||||||
if X is None:
|
if X is None:
|
||||||
X = self.initialise_latent(init, Q, Y)
|
X = self.initialise_latent(init, Q, Y)
|
||||||
GP_regression.__init__(self, X, Y, **kwargs)
|
if kernel is None:
|
||||||
|
kernel = kern.rbf(Q) + kern.bias(Q)
|
||||||
|
likelihood = Gaussian(Y)
|
||||||
|
GP.__init__(self, X, likelihood, kernel, **kwargs)
|
||||||
|
|
||||||
def initialise_latent(self, init, Q, Y):
|
def initialise_latent(self, init, Q, Y):
|
||||||
if init == 'PCA':
|
if init == 'PCA':
|
||||||
|
|
@ -34,23 +38,19 @@ class GPLVM(GP_regression):
|
||||||
return np.random.randn(Y.shape[0], Q)
|
return np.random.randn(Y.shape[0], Q)
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
return (sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[])
|
return sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) + GP._get_param_names(self)
|
||||||
+ self.kern._get_param_names_transformed())
|
|
||||||
|
|
||||||
def _get_params(self):
|
def _get_params(self):
|
||||||
return np.hstack((self.X.flatten(), self.kern._get_params_transformed()))
|
return np.hstack((self.X.flatten(), GP._get_params(self)))
|
||||||
|
|
||||||
def _set_params(self,x):
|
def _set_params(self,x):
|
||||||
self.X = x[:self.X.size].reshape(self.N,self.Q).copy()
|
self.X = x[:self.X.size].reshape(self.N,self.Q).copy()
|
||||||
GP_regression._set_params(self, x[self.X.size:])
|
GP._set_params(self, x[self.X.size:])
|
||||||
|
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
dL_dK = self.dL_dK()
|
dL_dX = 2.*self.kern.dK_dX(self.dL_dK,self.X)
|
||||||
|
|
||||||
dL_dtheta = self.kern.dK_dtheta(dL_dK,self.X)
|
return np.hstack((dL_dX.flatten(),GP._log_likelihood_gradients(self)))
|
||||||
dL_dX = 2*self.kern.dK_dX(dL_dK,self.X)
|
|
||||||
|
|
||||||
return np.hstack((dL_dX.flatten(),dL_dtheta))
|
|
||||||
|
|
||||||
def plot(self):
|
def plot(self):
|
||||||
assert self.Y.shape[1]==2
|
assert self.Y.shape[1]==2
|
||||||
|
|
|
||||||
|
|
@ -161,7 +161,7 @@ class GradientTests(unittest.TestCase):
|
||||||
kernel = GPy.kern.rbf(1)
|
kernel = GPy.kern.rbf(1)
|
||||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||||
likelihood = GPy.likelihoods.EP(Y, distribution)
|
likelihood = GPy.likelihoods.EP(Y, distribution)
|
||||||
m = GPy.models.GP(X,kernel,likelihood=likelihood)
|
m = GPy.models.GP(X, likelihood, kernel)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
self.assertTrue(m.EPEM)
|
self.assertTrue(m.EPEM)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue