mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
general tidying in sparse_GP_regression
Have also made a small ampount of headway in enabling heteroscedatic noise.
This commit is contained in:
parent
365d069ae8
commit
1d8104bcc2
1 changed files with 32 additions and 26 deletions
|
|
@ -10,6 +10,10 @@ from .. import kern
|
||||||
from ..inference.likelihoods import likelihood
|
from ..inference.likelihoods import likelihood
|
||||||
from GP_regression import GP_regression
|
from GP_regression import GP_regression
|
||||||
|
|
||||||
|
#Still TODO:
|
||||||
|
# make use of slices properly (kernel can now do this)
|
||||||
|
# enable heteroscedatic noise (kernel will need to compute psi2 as a (NxMxM) array)
|
||||||
|
|
||||||
class sparse_GP_regression(GP_regression):
|
class sparse_GP_regression(GP_regression):
|
||||||
"""
|
"""
|
||||||
Variational sparse GP model (Regression)
|
Variational sparse GP model (Regression)
|
||||||
|
|
@ -64,8 +68,9 @@ class sparse_GP_regression(GP_regression):
|
||||||
|
|
||||||
def _computations(self):
|
def _computations(self):
|
||||||
# TODO find routine to multiply triangular matrices
|
# TODO find routine to multiply triangular matrices
|
||||||
self.psi1Y = np.dot(self.psi1, self.Y)
|
self.V = self.beta*self.Y
|
||||||
self.psi1YYpsi1 = np.dot(self.psi1Y, self.psi1Y.T)
|
self.psi1V = np.dot(self.psi1, self.V)
|
||||||
|
self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T)
|
||||||
self.Lm = jitchol(self.Kmm)
|
self.Lm = jitchol(self.Kmm)
|
||||||
self.Lmi = chol_inv(self.Lm)
|
self.Lmi = chol_inv(self.Lm)
|
||||||
self.Kmmi = np.dot(self.Lmi.T, self.Lmi)
|
self.Kmmi = np.dot(self.Lmi.T, self.Lmi)
|
||||||
|
|
@ -77,24 +82,19 @@ class sparse_GP_regression(GP_regression):
|
||||||
self.LLambdai = np.dot(self.LBi, self.Lmi)
|
self.LLambdai = np.dot(self.LBi, self.Lmi)
|
||||||
self.trace_K = self.psi0 - np.trace(self.A)
|
self.trace_K = self.psi0 - np.trace(self.A)
|
||||||
self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi)
|
self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi)
|
||||||
self.C = mdot(self.LLambdai, self.psi1Y)
|
self.C = mdot(self.LLambdai, self.psi1V)
|
||||||
self.G = mdot(self.LBL_inv, self.psi1YYpsi1, self.LBL_inv.T)
|
self.G = mdot(self.LBL_inv, self.psi1VVpsi1, self.LBL_inv.T)
|
||||||
|
|
||||||
# Computes dL_dpsi
|
# Compute dL_dpsi
|
||||||
self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones(self.N)
|
self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones(self.N)
|
||||||
dC_dpsi1 = (self.LLambdai.T[:,:, None, None] * self.Y) # this is sane.
|
dC_dpsi1 = (self.LLambdai.T[:,:, None, None] * self.V)
|
||||||
tmp = (dC_dpsi1*self.C[None,:,None,:]).sum(1).sum(-1)
|
self.dL_dpsi1 = (dC_dpsi1*self.C[None,:,None,:]).sum(1).sum(-1)
|
||||||
self.dL_dpsi1 = self.beta2 * tmp
|
self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G)
|
||||||
self.dL_dpsi2 = (- 0.5 * self.D * self.beta * (self.LBL_inv - self.Kmmi)
|
|
||||||
- self.beta**3 * 0.5 * self.G)
|
|
||||||
|
|
||||||
# Computes dL_dKmm TODO: nicer precomputations
|
|
||||||
|
|
||||||
|
# Compute dL_dKmm
|
||||||
self.dL_dKmm = -0.5 * self.beta * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB
|
self.dL_dKmm = -0.5 * self.beta * self.D * mdot(self.Lmi.T, self.A, self.Lmi) # dB
|
||||||
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC
|
self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - 2.*self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) + self.Kmmi) # dC
|
||||||
tmp = (mdot(self.LBL_inv, self.psi1YYpsi1, self.Kmmi)
|
self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE
|
||||||
- self.beta*mdot(self.G, self.psi2, self.Kmmi))
|
|
||||||
self.dL_dKmm += -0.5*self.beta2*(tmp + tmp.T - self.G) # dE
|
|
||||||
|
|
||||||
def get_param(self):
|
def get_param(self):
|
||||||
return np.hstack([self.Z.flatten(),self.beta,self.kern.extract_param()])
|
return np.hstack([self.Z.flatten(),self.beta,self.kern.extract_param()])
|
||||||
|
|
@ -103,29 +103,35 @@ class sparse_GP_regression(GP_regression):
|
||||||
return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern.extract_param_names()
|
return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern.extract_param_names()
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
|
"""
|
||||||
|
Compute the (lower bound on the) log marginal likelihood
|
||||||
|
"""
|
||||||
A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.beta))
|
A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.beta))
|
||||||
B = -0.5*self.beta*self.D*self.trace_K
|
B = -0.5*self.beta*self.D*self.trace_K
|
||||||
C = -self.D * np.sum(np.log(np.diag(self.LB)))
|
C = -self.D * np.sum(np.log(np.diag(self.LB)))
|
||||||
D = -0.5*self.beta*self.trYYT
|
D = -0.5*self.beta*self.trYYT
|
||||||
E = +0.5*self.beta2*np.sum(self.psi1YYpsi1 * self.LBL_inv)
|
E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv)
|
||||||
|
|
||||||
return A+B+C+D+E
|
return A+B+C+D+E
|
||||||
|
|
||||||
def dL_dbeta(self):
|
def dL_dbeta(self):
|
||||||
""" compute the gradient of the log likelihood wrt beta.
|
"""
|
||||||
TODO: suport heteroscedatic noise"""
|
Compute the gradient of the log likelihood wrt beta.
|
||||||
|
TODO: suport heteroscedatic noise
|
||||||
|
"""
|
||||||
|
|
||||||
dA_dbeta = 0.5 * self.N*self.D/self.beta
|
dA_dbeta = 0.5 * self.N*self.D/self.beta
|
||||||
dB_dbeta = - 0.5 * self.D * self.trace_K
|
dB_dbeta = - 0.5 * self.D * self.trace_K
|
||||||
dC_dbeta = - 0.5 * self.D * np.sum(self.Bi*self.A)
|
dC_dbeta = - 0.5 * self.D * np.sum(self.Bi*self.A)
|
||||||
dD_dbeta = - 0.5 * self.trYYT
|
dD_dbeta = - 0.5 * self.trYYT
|
||||||
tmp = mdot(self.LBi.T, self.LLambdai, self.psi1Y)
|
tmp = mdot(self.LBi.T, self.LLambdai, self.psi1V)
|
||||||
dE_dbeta = (self.beta * np.sum(np.square(self.C)) - 0.5 * self.beta2
|
dE_dbeta = np.sum(np.square(self.C))/self.beta - 0.5 * np.sum(self.A * np.dot(tmp, tmp.T))
|
||||||
* np.sum(self.A * np.dot(tmp, tmp.T)))
|
|
||||||
|
|
||||||
return np.squeeze(dA_dbeta + dB_dbeta + dC_dbeta + dD_dbeta + dE_dbeta)
|
return np.squeeze(dA_dbeta + dB_dbeta + dC_dbeta + dD_dbeta + dE_dbeta)
|
||||||
|
|
||||||
def dL_dtheta(self):
|
def dL_dtheta(self):
|
||||||
|
"""
|
||||||
|
Compute and return the derivative of the log marginal likelihood wrt the parameters of the kernel
|
||||||
|
"""
|
||||||
#re-cast computations in psi2 back to psi1:
|
#re-cast computations in psi2 back to psi1:
|
||||||
dL_dpsi1 = self.dL_dpsi1 + 2.*np.dot(self.dL_dpsi2,self.psi1)
|
dL_dpsi1 = self.dL_dpsi1 + 2.*np.dot(self.dL_dpsi2,self.psi1)
|
||||||
|
|
||||||
|
|
@ -152,7 +158,7 @@ class sparse_GP_regression(GP_regression):
|
||||||
Kx = self.kern.K(self.Z, Xnew)
|
Kx = self.kern.K(self.Z, Xnew)
|
||||||
Kxx = self.kern.K(Xnew)
|
Kxx = self.kern.K(Xnew)
|
||||||
|
|
||||||
mu = self.beta * mdot(Kx.T, self.LBL_inv, self.psi1Y)
|
mu = mdot(Kx.T, self.LBL_inv, self.psi1V)
|
||||||
var = Kxx - mdot(Kx.T, (self.Kmmi - self.LBL_inv), Kx) + np.eye(Xnew.shape[0])/self.beta # TODO: This beta doesn't belong here in the EP case.
|
var = Kxx - mdot(Kx.T, (self.Kmmi - self.LBL_inv), Kx) + np.eye(Xnew.shape[0])/self.beta # TODO: This beta doesn't belong here in the EP case.
|
||||||
return mu,var
|
return mu,var
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue