I think the gradients bug in the sparse GP model is due to Kmm being unstable to invert. REducing M in some of the examples really helps

This commit is contained in:
James Hensman 2012-12-02 12:32:20 +00:00
parent d71ad99db9
commit 1c60e50fed
3 changed files with 6 additions and 4 deletions

View file

@ -9,6 +9,7 @@ np.random.seed(1)
print "sparse GPLVM with RBF kernel"
N = 100
M = 4
Q = 1
D = 2
#generate GPLVM-like data
@ -17,7 +18,7 @@ k = GPy.kern.rbf(Q, 1.0, 2.0) + GPy.kern.white(Q, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
m = GPy.models.sparse_GPLVM(Y, Q, M = 10)
m = GPy.models.sparse_GPLVM(Y, Q, M=M)
m.constrain_positive('(rbf|bias|noise)')
m.constrain_bounded('white', 1e-3, 0.1)
# m.plot()

View file

@ -12,6 +12,7 @@ import GPy
np.random.seed(2)
pb.ion()
N = 500
M = 5
######################################
## 1 dimensional example
@ -26,7 +27,7 @@ noise = GPy.kern.white(1)
kernel = rbf + noise
# create simple GP model
m1 = GPy.models.sparse_GP_regression(X,Y,kernel, M = 10)
m1 = GPy.models.sparse_GP_regression(X, Y, kernel, M=M)
# contrain all parameters to be positive
m1.constrain_positive('(variance|lengthscale|precision)')

View file

@ -90,9 +90,9 @@ class sparse_GP_regression(GP_regression):
# Computes dL_dKmm TODO: nicer precomputations
self.dL_dKmm = -0.5 * self.beta * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB
tmp = self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi)
self.dL_dKmm = -self.beta * self.D * 0.5 * mdot(self.Lmi.T, self.A, self.Lmi) # dB
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC #TODO: is tmp PD? save some computations here
tmp = (mdot(self.LBL_inv, self.psi1YYpsi1, self.Kmmi)
- self.beta*mdot(self.G, self.psi2, self.Kmmi))
self.dL_dKmm += -0.5*self.beta2*(tmp + tmp.T - self.G) # dE