mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-28 22:36:24 +02:00
started sorting out some tests
This commit is contained in:
parent
da4686dd3c
commit
4eac0bd6db
7 changed files with 42 additions and 43 deletions
|
|
@ -3,7 +3,7 @@ Created on 24 Apr 2013
|
|||
|
||||
@author: maxz
|
||||
'''
|
||||
from GPy.inference.gradient_descent_update_rules import FletcherReeves, \
|
||||
from gradient_descent_update_rules import FletcherReeves, \
|
||||
PolakRibiere
|
||||
from Queue import Empty
|
||||
from multiprocessing import Value
|
||||
|
|
|
|||
|
|
@ -67,13 +67,13 @@ class Add(Kern):
|
|||
return sum([p.Kdiag(X[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)])
|
||||
|
||||
|
||||
def psi0(self, Z, mu, S):
|
||||
def psi0(self, Z, variational_posterior):
|
||||
return np.sum([p.psi0(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0)
|
||||
|
||||
def psi1(self, Z, mu, S):
|
||||
def psi1(self, Z, variational_posterior):
|
||||
return np.sum([p.psi1(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0)
|
||||
|
||||
def psi2(self, Z, mu, S):
|
||||
def psi2(self, Z, variational_posterior):
|
||||
psi2 = np.sum([p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0)
|
||||
|
||||
# compute the "cross" terms
|
||||
|
|
@ -101,7 +101,7 @@ class Add(Kern):
|
|||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
return psi2
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, variational_posterior, Z):
|
||||
from white import White
|
||||
from rbf import RBF
|
||||
#from rbf_inv import RBFInv
|
||||
|
|
|
|||
|
|
@ -18,26 +18,26 @@ class Static(Kern):
|
|||
ret[:] = self.variance
|
||||
return ret
|
||||
|
||||
def gradients_X(self, dL_dK, X, X2, target):
|
||||
def gradients_X(self, dL_dK, X, X2=None):
|
||||
return np.zeros(X.shape)
|
||||
|
||||
def gradients_X_diag(self, dL_dKdiag, X, target):
|
||||
def gradients_X_diag(self, dL_dKdiag, X):
|
||||
return np.zeros(X.shape)
|
||||
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
return np.zeros(Z.shape)
|
||||
|
||||
def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||
return np.zeros(mu.shape), np.zeros(S.shape)
|
||||
def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
|
||||
|
||||
def psi0(self, Z, mu, S):
|
||||
return self.Kdiag(mu)
|
||||
def psi0(self, Z, variational_posterior):
|
||||
return self.Kdiag(variational_posterior.mean)
|
||||
|
||||
def psi1(self, Z, mu, S, target):
|
||||
return self.K(mu, Z)
|
||||
def psi1(self, Z, variational_posterior):
|
||||
return self.K(variational_posterior.mean, Z)
|
||||
|
||||
def psi2(Z, mu, S):
|
||||
K = self.K(mu, Z)
|
||||
def psi2(self, Z, variational_posterior):
|
||||
K = self.K(variational_posterior.mean, Z)
|
||||
return K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
|
||||
|
||||
|
||||
|
|
@ -51,8 +51,8 @@ class White(Static):
|
|||
else:
|
||||
return np.zeros((X.shape[0], X2.shape[0]))
|
||||
|
||||
def psi2(self, Z, mu, S, target):
|
||||
return np.zeros((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||
def psi2(self, Z, variational_posterior):
|
||||
return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||
|
||||
def update_gradients_full(self, dL_dK, X):
|
||||
self.variance.gradient = np.trace(dL_dK)
|
||||
|
|
@ -60,7 +60,7 @@ class White(Static):
|
|||
def update_gradients_diag(self, dL_dKdiag, X):
|
||||
self.variance.gradient = dL_dKdiag.sum()
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
self.variance.gradient = np.trace(dL_dKmm) + dL_dpsi0.sum()
|
||||
|
||||
|
||||
|
|
@ -80,11 +80,11 @@ class Bias(Static):
|
|||
def update_gradients_diag(self, dL_dKdiag, X):
|
||||
self.variance.gradient = dL_dK.sum()
|
||||
|
||||
def psi2(self, Z, mu, S, target):
|
||||
def psi2(self, Z, variational_posterior):
|
||||
ret = np.empty((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||
ret[:] = self.variance**2
|
||||
return ret
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
self.variance.gradient = dL_dKmm.sum() + dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()
|
||||
|
||||
|
|
|
|||
|
|
@ -10,11 +10,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_bias_kern(self):
|
||||
N, num_inducing, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -22,11 +22,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_linear_kern(self):
|
||||
N, num_inducing, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -34,11 +34,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_rbf_kern(self):
|
||||
N, num_inducing, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -46,11 +46,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_rbf_bias_kern(self):
|
||||
N, num_inducing, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -58,11 +58,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_rbf_line_kern(self):
|
||||
N, num_inducing, input_dim, D = 10, 3, 2, 4
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -70,11 +70,11 @@ class BGPLVMTests(unittest.TestCase):
|
|||
def test_linear_bias_kern(self):
|
||||
N, num_inducing, input_dim, D = 30, 5, 4, 30
|
||||
X = np.random.rand(N, input_dim)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||
Y -= Y.mean(axis=0)
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -9,10 +9,10 @@ class GPLVMTests(unittest.TestCase):
|
|||
def test_bias_kern(self):
|
||||
num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4
|
||||
X = np.random.rand(num_data, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T
|
||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -20,10 +20,10 @@ class GPLVMTests(unittest.TestCase):
|
|||
def test_linear_kern(self):
|
||||
num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4
|
||||
X = np.random.rand(num_data, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T
|
||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
@ -31,10 +31,10 @@ class GPLVMTests(unittest.TestCase):
|
|||
def test_rbf_kern(self):
|
||||
num_data, num_inducing, input_dim, output_dim = 10, 3, 2, 4
|
||||
X = np.random.rand(num_data, input_dim)
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
K = k.K(X)
|
||||
Y = np.random.multivariate_normal(np.zeros(num_data),K,output_dim).T
|
||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001)
|
||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||
m.randomize()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ Created on 26 Apr 2013
|
|||
'''
|
||||
import unittest
|
||||
import numpy
|
||||
from GPy.inference.conjugate_gradient_descent import CGD, RUNNING
|
||||
from GPy.inference.optimization.conjugate_gradient_descent import CGD, RUNNING
|
||||
import pylab
|
||||
from scipy.optimize.optimize import rosen, rosen_der
|
||||
from GPy.inference.gradient_descent_update_rules import PolakRibiere
|
||||
from GPy.inference.optimization.gradient_descent_update_rules import PolakRibiere
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
|
@ -30,7 +30,7 @@ class Test(unittest.TestCase):
|
|||
assert numpy.allclose(res[0], 0, atol=1e-5)
|
||||
break
|
||||
except AssertionError:
|
||||
import ipdb;ipdb.set_trace()
|
||||
import pdb;pdb.set_trace()
|
||||
# RESTART
|
||||
pass
|
||||
else:
|
||||
|
|
@ -108,4 +108,3 @@ if __name__ == "__main__":
|
|||
res[0] = opt.opt(f, df, x0.copy(), callback, messages=True, maxiter=1000,
|
||||
report_every=7, gtol=1e-12, update_rule=PolakRibiere)
|
||||
|
||||
pass
|
||||
Loading…
Add table
Add a link
Reference in a new issue