mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
I think the gradients bug in the sparse GP model is due to Kmm being unstable to invert. REducing M in some of the examples really helps
This commit is contained in:
parent
d71ad99db9
commit
1c60e50fed
3 changed files with 6 additions and 4 deletions
|
|
@ -90,9 +90,9 @@ class sparse_GP_regression(GP_regression):
|
|||
|
||||
# Computes dL_dKmm TODO: nicer precomputations
|
||||
|
||||
self.dL_dKmm = -0.5 * self.beta * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB
|
||||
tmp = self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi)
|
||||
self.dL_dKmm = -self.beta * self.D * 0.5 * mdot(self.Lmi.T, self.A, self.Lmi) # dB
|
||||
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC
|
||||
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC #TODO: is tmp PD? save some computations here
|
||||
tmp = (mdot(self.LBL_inv, self.psi1YYpsi1, self.Kmmi)
|
||||
- self.beta*mdot(self.G, self.psi2, self.Kmmi))
|
||||
self.dL_dKmm += -0.5*self.beta2*(tmp + tmp.T - self.G) # dE
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue