mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-12 05:22:38 +02:00
Replaced Q by input_dim
This commit is contained in:
parent
312cfebcb1
commit
97f3357b6d
22 changed files with 271 additions and 271 deletions
|
|
@ -7,67 +7,67 @@ import GPy
|
|||
|
||||
class BGPLVMTests(unittest.TestCase):
|
||||
def test_bias_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_linear_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.linear(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_rbf_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_rbf_bias_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
#@unittest.skip('psi2 cross terms are NotImplemented for this combination')
|
||||
def test_linear_bias_kern(self):
|
||||
N, M, Q, D = 30, 5, 4, 30
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.linear(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 30, 5, 4, 30
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.linear(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -7,37 +7,37 @@ import GPy
|
|||
|
||||
class GPLVMTests(unittest.TestCase):
|
||||
def test_bias_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, Q, kernel = k)
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_linear_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.linear(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, Q, kernel = k)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_rbf_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, Q, kernel = k)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -14,16 +14,16 @@ class MRDTests(unittest.TestCase):
|
|||
|
||||
def test_gradients(self):
|
||||
num_m = 3
|
||||
N, M, Q, D = 20, 8, 6, 20
|
||||
X = np.random.rand(N, Q)
|
||||
N, M, input_dim, D = 20, 8, 6, 20
|
||||
X = np.random.rand(N, input_dim)
|
||||
|
||||
k = GPy.kern.linear(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
|
||||
K = k.K(X)
|
||||
|
||||
Ylist = [np.random.multivariate_normal(np.zeros(N), K, D).T for _ in range(num_m)]
|
||||
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
|
||||
|
||||
m = GPy.models.MRD(likelihood_list, Q=Q, kernels=k, M=M)
|
||||
m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -16,25 +16,25 @@ class PsiStatModel(model):
|
|||
self.X = X
|
||||
self.X_variance = X_variance
|
||||
self.Z = Z
|
||||
self.N, self.Q = X.shape
|
||||
self.M, Q = Z.shape
|
||||
assert self.Q == Q, "shape missmatch: Z:{!s} X:{!s}".format(Z.shape, X.shape)
|
||||
self.N, self.input_dim = X.shape
|
||||
self.M, input_dim = Z.shape
|
||||
assert self.input_dim == input_dim, "shape missmatch: Z:{!s} X:{!s}".format(Z.shape, X.shape)
|
||||
self.kern = kernel
|
||||
super(PsiStatModel, self).__init__()
|
||||
self.psi_ = self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance)
|
||||
def _get_param_names(self):
|
||||
Xnames = ["{}_{}_{}".format(what, i, j) for what, i, j in itertools.product(['X', 'X_variance'], range(self.N), range(self.Q))]
|
||||
Znames = ["Z_{}_{}".format(i, j) for i, j in itertools.product(range(self.M), range(self.Q))]
|
||||
Xnames = ["{}_{}_{}".format(what, i, j) for what, i, j in itertools.product(['X', 'X_variance'], range(self.N), range(self.input_dim))]
|
||||
Znames = ["Z_{}_{}".format(i, j) for i, j in itertools.product(range(self.M), range(self.input_dim))]
|
||||
return Xnames + Znames + self.kern._get_param_names()
|
||||
def _get_params(self):
|
||||
return numpy.hstack([self.X.flatten(), self.X_variance.flatten(), self.Z.flatten(), self.kern._get_params()])
|
||||
def _set_params(self, x, save_old=True, save_count=0):
|
||||
start, end = 0, self.X.size
|
||||
self.X = x[start:end].reshape(self.N, self.Q)
|
||||
self.X = x[start:end].reshape(self.N, self.input_dim)
|
||||
start, end = end, end + self.X_variance.size
|
||||
self.X_variance = x[start: end].reshape(self.N, self.Q)
|
||||
self.X_variance = x[start: end].reshape(self.N, self.input_dim)
|
||||
start, end = end, end + self.Z.size
|
||||
self.Z = x[start: end].reshape(self.M, self.Q)
|
||||
self.Z = x[start: end].reshape(self.M, self.input_dim)
|
||||
self.kern._set_params(x[end:])
|
||||
def log_likelihood(self):
|
||||
return self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance).sum()
|
||||
|
|
@ -43,24 +43,24 @@ class PsiStatModel(model):
|
|||
try:
|
||||
psiZ = self.kern.__getattribute__("d" + self.which + "_dZ")(numpy.ones_like(self.psi_), self.Z, self.X, self.X_variance)
|
||||
except AttributeError:
|
||||
psiZ = numpy.zeros(self.M * self.Q)
|
||||
psiZ = numpy.zeros(self.M * self.input_dim)
|
||||
thetagrad = self.kern.__getattribute__("d" + self.which + "_dtheta")(numpy.ones_like(self.psi_), self.Z, self.X, self.X_variance).flatten()
|
||||
return numpy.hstack((psimu.flatten(), psiS.flatten(), psiZ.flatten(), thetagrad))
|
||||
|
||||
class DPsiStatTest(unittest.TestCase):
|
||||
Q = 5
|
||||
input_dim = 5
|
||||
N = 50
|
||||
M = 10
|
||||
D = 20
|
||||
X = numpy.random.randn(N, Q)
|
||||
X = numpy.random.randn(N, input_dim)
|
||||
X_var = .5 * numpy.ones_like(X) + .4 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
|
||||
Z = numpy.random.permutation(X)[:M]
|
||||
Y = X.dot(numpy.random.randn(Q, D))
|
||||
# kernels = [GPy.kern.linear(Q, ARD=True, variances=numpy.random.rand(Q)), GPy.kern.rbf(Q, ARD=True), GPy.kern.bias(Q)]
|
||||
Y = X.dot(numpy.random.randn(input_dim, D))
|
||||
# kernels = [GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)), GPy.kern.rbf(input_dim, ARD=True), GPy.kern.bias(input_dim)]
|
||||
|
||||
kernels = [GPy.kern.linear(Q), GPy.kern.rbf(Q), GPy.kern.bias(Q),
|
||||
GPy.kern.linear(Q) + GPy.kern.bias(Q),
|
||||
GPy.kern.rbf(Q) + GPy.kern.bias(Q)]
|
||||
kernels = [GPy.kern.linear(input_dim), GPy.kern.rbf(input_dim), GPy.kern.bias(input_dim),
|
||||
GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim),
|
||||
GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)]
|
||||
|
||||
def testPsi0(self):
|
||||
for k in self.kernels:
|
||||
|
|
@ -108,31 +108,31 @@ if __name__ == "__main__":
|
|||
import sys
|
||||
interactive = 'i' in sys.argv
|
||||
if interactive:
|
||||
# N, M, Q, D = 30, 5, 4, 30
|
||||
# X = numpy.random.rand(N, Q)
|
||||
# k = GPy.kern.linear(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
# N, M, input_dim, D = 30, 5, 4, 30
|
||||
# X = numpy.random.rand(N, input_dim)
|
||||
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
# K = k.K(X)
|
||||
# Y = numpy.random.multivariate_normal(numpy.zeros(N), K, D).T
|
||||
# Y -= Y.mean(axis=0)
|
||||
# k = GPy.kern.linear(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
# m = GPy.models.Bayesian_GPLVM(Y, Q, kernel=k, M=M)
|
||||
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
# m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, M=M)
|
||||
# m.ensure_default_constraints()
|
||||
# m.randomize()
|
||||
# # self.assertTrue(m.checkgrad())
|
||||
numpy.random.seed(0)
|
||||
Q = 5
|
||||
input_dim = 5
|
||||
N = 50
|
||||
M = 10
|
||||
D = 15
|
||||
X = numpy.random.randn(N, Q)
|
||||
X = numpy.random.randn(N, input_dim)
|
||||
X_var = .5 * numpy.ones_like(X) + .1 * numpy.clip(numpy.random.randn(*X.shape), 0, 1)
|
||||
Z = numpy.random.permutation(X)[:M]
|
||||
Y = X.dot(numpy.random.randn(Q, D))
|
||||
# kernel = GPy.kern.bias(Q)
|
||||
Y = X.dot(numpy.random.randn(input_dim, D))
|
||||
# kernel = GPy.kern.bias(input_dim)
|
||||
#
|
||||
# kernels = [GPy.kern.linear(Q), GPy.kern.rbf(Q), GPy.kern.bias(Q),
|
||||
# GPy.kern.linear(Q) + GPy.kern.bias(Q),
|
||||
# GPy.kern.rbf(Q) + GPy.kern.bias(Q)]
|
||||
# kernels = [GPy.kern.linear(input_dim), GPy.kern.rbf(input_dim), GPy.kern.bias(input_dim),
|
||||
# GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim),
|
||||
# GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)]
|
||||
|
||||
# for k in kernels:
|
||||
# m = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
|
||||
|
|
@ -140,18 +140,18 @@ if __name__ == "__main__":
|
|||
# assert m.checkgrad(), "{} x psi1".format("+".join(map(lambda x: x.name, k.parts)))
|
||||
#
|
||||
# m0 = PsiStatModel('psi0', X=X, X_variance=X_var, Z=Z,
|
||||
# M=M, kernel=GPy.kern.linear(Q))
|
||||
# M=M, kernel=GPy.kern.linear(input_dim))
|
||||
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
|
||||
# M=M, kernel=kernel)
|
||||
# m1 = PsiStatModel('psi1', X=X, X_variance=X_var, Z=Z,
|
||||
# M=M, kernel=kernel)
|
||||
# m2 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
||||
# M=M, kernel=GPy.kern.rbf(Q))
|
||||
# M=M, kernel=GPy.kern.rbf(input_dim))
|
||||
m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
||||
M=M, kernel=GPy.kern.linear(Q, ARD=True, variances=numpy.random.rand(Q)))
|
||||
M=M, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
|
||||
m3.ensure_default_constraints()
|
||||
# + GPy.kern.bias(Q))
|
||||
# + GPy.kern.bias(input_dim))
|
||||
# m4 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
||||
# M=M, kernel=GPy.kern.rbf(Q) + GPy.kern.bias(Q))
|
||||
# M=M, kernel=GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim))
|
||||
else:
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -7,38 +7,38 @@ import GPy
|
|||
|
||||
class sparse_GPLVMTests(unittest.TestCase):
|
||||
def test_bias_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
@unittest.skip('linear kernels do not have dKdiag_dX')
|
||||
def test_linear_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.linear(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_rbf_kern(self):
|
||||
N, M, Q, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
N, M, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, Q, kernel = k, M=M)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
m = GPy.models.sparse_GPLVM(Y, input_dim, kernel = k, M=M)
|
||||
m.ensure_default_constraints()
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -144,23 +144,23 @@ class GradientTests(unittest.TestCase):
|
|||
|
||||
def test_GPLVM_rbf_bias_white_kern_2D(self):
|
||||
""" Testing GPLVM with rbf + bias and white kernel """
|
||||
N, Q, D = 50, 1, 2
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.rbf(Q, 0.5, 0.9*np.ones((1,))) + GPy.kern.bias(Q, 0.1) + GPy.kern.white(Q, 0.05)
|
||||
N, input_dim, D = 50, 1, 2
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim, 0.5, 0.9*np.ones((1,))) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
m = GPy.models.GPLVM(Y, Q, kernel = k)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.ensure_default_constraints()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_GPLVM_rbf_linear_white_kern_2D(self):
|
||||
""" Testing GPLVM with rbf + bias and white kernel """
|
||||
N, Q, D = 50, 1, 2
|
||||
X = np.random.rand(N, Q)
|
||||
k = GPy.kern.linear(Q) + GPy.kern.bias(Q, 0.1) + GPy.kern.white(Q, 0.05)
|
||||
N, input_dim, D = 50, 1, 2
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||
m = GPy.models.GPLVM(Y, Q, init = 'PCA', kernel = k)
|
||||
m = GPy.models.GPLVM(Y, input_dim, init = 'PCA', kernel = k)
|
||||
m.ensure_default_constraints()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue