mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 00:02:38 +02:00
Stupid error, needed to actually USE the gradients in student t... Looks like s2 of rasm's may have an extra -? dW_df == -d2logpdf_df not just d2logpdf_df?
This commit is contained in:
parent
fc44478ed2
commit
d2a0e4a265
2 changed files with 11 additions and 20 deletions
|
|
@ -157,7 +157,7 @@ class LaplaceInference(object):
|
|||
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, extra_data=Y_metadata) - np.sum(np.log(np.diag(L)))
|
||||
|
||||
#Compute vival matrices for derivatives
|
||||
dW_df = likelihood.d3logpdf_df3(f_hat, Y, extra_data=Y_metadata) # d3lik_d3fhat
|
||||
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, extra_data=Y_metadata) # -d3lik_d3fhat
|
||||
woodbury_vector = likelihood.dlogpdf_df(f_hat, Y, extra_data=Y_metadata)
|
||||
dL_dfhat = -0.5*(np.diag(Ki_W_i)[:, None]*dW_df) #why isn't this -0.5? s2 in R&W p126 line 9.
|
||||
#BiK, _ = dpotrs(L, K, lower=1)
|
||||
|
|
@ -172,7 +172,7 @@ class LaplaceInference(object):
|
|||
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
|
||||
|
||||
#Implicit
|
||||
implicit_part = -np.dot(woodbury_vector, dL_dfhat.T).dot(I_KW_i)
|
||||
implicit_part = np.dot(woodbury_vector, dL_dfhat.T).dot(I_KW_i)
|
||||
|
||||
dL_dK = explicit_part + implicit_part
|
||||
else:
|
||||
|
|
@ -189,28 +189,15 @@ class LaplaceInference(object):
|
|||
dL_dthetaL = np.zeros(num_params)
|
||||
for thetaL_i in range(num_params):
|
||||
#Explicit
|
||||
dL_dthetaL_exp = ( + np.sum(dlik_dthetaL[thetaL_i])
|
||||
dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i])
|
||||
# The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL
|
||||
+ 0.5*np.sum(np.diag(Ki_W_i).flatten()*dlik_hess_dthetaL[:, thetaL_i].flatten())
|
||||
#- 0.5*np.trace(np.diag(Ki_W_i)[:,None]*dlik_hess_dthetaL[:, thetaL_i])
|
||||
#+ 0.5*np.trace(np.dot(I_KW_i, K)*dlik_hess_dthetaL[:, thetaL_i])
|
||||
)
|
||||
|
||||
#Implicit
|
||||
dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[:, thetaL_i])
|
||||
#dfhat_dthetaL = mdot(Wi_K_i, dlik_grad_dthetaL[:, thetaL_i])
|
||||
#dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[:, thetaL_i])
|
||||
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
|
||||
#import pylab as pb
|
||||
#pb.figure(1)
|
||||
#pb.matshow(Ki_W_i)
|
||||
#pb.title('I_KW_i approx')
|
||||
#pb.colorbar()
|
||||
#pb.figure(2)
|
||||
#pb.matshow(np.linalg.inv(np.dot(np.eye(Y.shape[0]) + np.sqrt(W).T*K*np.sqrt(W), K)))
|
||||
#pb.title('I_KW_i')
|
||||
#pb.colorbar()
|
||||
#print likelihood
|
||||
#pb.show()
|
||||
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
|
||||
dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp
|
||||
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -38,8 +38,12 @@ class StudentT(Likelihood):
|
|||
self.variance = (self.v / float(self.v - 2)) * self.sigma2
|
||||
|
||||
def update_gradients(self, partial):
|
||||
self.sigma2.gradient = np.ones(1) #FIXME: Not done yet
|
||||
self.v.gradient = np.ones(1) #FIXME: Not done yet
|
||||
"""
|
||||
Pull out the gradients, be careful as the order must match the order
|
||||
in which the parameters are added
|
||||
"""
|
||||
self.sigma2.gradient = partial[0]
|
||||
self.v.gradient = partial[1]
|
||||
|
||||
def pdf_link(self, link_f, y, extra_data=None):
|
||||
"""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue