mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
changed the behaviour of pdinv: now returns L and Li as well as the inverse
This commit is contained in:
parent
574f9f4e0a
commit
c72d1b1a5b
5 changed files with 21 additions and 25 deletions
|
|
@ -19,7 +19,7 @@ class finite_dimensional(kernpart):
|
||||||
self.D = D
|
self.D = D
|
||||||
self.F = F
|
self.F = F
|
||||||
self.G = G
|
self.G = G
|
||||||
self.G_1 ,tmp = pdinv(G)
|
self.G_1 ,L,Li,logdet = pdinv(G)
|
||||||
self.n = F.shape[0]
|
self.n = F.shape[0]
|
||||||
if weights is not None:
|
if weights is not None:
|
||||||
assert weights.shape==(self.n,)
|
assert weights.shape==(self.n,)
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ class GP_regression(model):
|
||||||
def set_param(self,p):
|
def set_param(self,p):
|
||||||
self.kern.expand_param(p)
|
self.kern.expand_param(p)
|
||||||
self.K = self.kern.K(self.X,slices1=self.Xslices)
|
self.K = self.kern.K(self.X,slices1=self.Xslices)
|
||||||
self.Ki,self.hld = pdinv(self.K)
|
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
|
||||||
|
|
||||||
def get_param(self):
|
def get_param(self):
|
||||||
return self.kern.extract_param()
|
return self.kern.extract_param()
|
||||||
|
|
@ -90,7 +90,7 @@ class GP_regression(model):
|
||||||
return -0.5*np.sum(np.multiply(self.Ki, self.Youter))
|
return -0.5*np.sum(np.multiply(self.Ki, self.Youter))
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
complexity_term = -0.5*self.N*self.D*np.log(2.*np.pi) - self.D*self.hld
|
complexity_term = -0.5*self.N*self.D*np.log(2.*np.pi) - 0.5*self.D*self.K_logdet
|
||||||
return complexity_term + self._model_fit_term()
|
return complexity_term + self._model_fit_term()
|
||||||
|
|
||||||
def dL_dK(self):
|
def dL_dK(self):
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ class generalized_FITC(model):
|
||||||
def posterior_param(self):
|
def posterior_param(self):
|
||||||
self.Knn_diag = self.kernel.Kdiag(self.X)
|
self.Knn_diag = self.kernel.Kdiag(self.X)
|
||||||
self.Kmm = self.kernel.K(self.Z)
|
self.Kmm = self.kernel.K(self.Z)
|
||||||
self.Kmmi, self.Kmm_hld = pdinv(self.Kmm)
|
self.Kmmi, self.Lmm, self.Lmmi, self.Kmm_logdet = pdinv(self.Kmm)
|
||||||
self.Knm = self.kernel.K(self.X,self.Z)
|
self.Knm = self.kernel.K(self.X,self.Z)
|
||||||
self.KmmiKmn = np.dot(self.Kmmi,self.Knm.T)
|
self.KmmiKmn = np.dot(self.Kmmi,self.Knm.T)
|
||||||
self.Qnn = np.dot(self.Knm,self.KmmiKmn)
|
self.Qnn = np.dot(self.Knm,self.KmmiKmn)
|
||||||
|
|
@ -72,10 +72,10 @@ class generalized_FITC(model):
|
||||||
self.Taut = self.ep_approx.tau_tilde/(1.+ self.ep_approx.tau_tilde*self.Diag0)
|
self.Taut = self.ep_approx.tau_tilde/(1.+ self.ep_approx.tau_tilde*self.Diag0)
|
||||||
self.KmnTaut = self.Knm.T*self.Taut[None,:]
|
self.KmnTaut = self.Knm.T*self.Taut[None,:]
|
||||||
self.KmnTautKnm = np.dot(self.KmnTaut, self.Knm)
|
self.KmnTautKnm = np.dot(self.KmnTaut, self.Knm)
|
||||||
self.Woodbury_inv, self.Woodbury_hld = pdinv(self.Kmm + self.KmnTautKnm)
|
self.Woodbury_inv, self.Wood_L, self.Wood_Li, self.Woodbury_logdet = pdinv(self.Kmm + self.KmnTautKnm)
|
||||||
self.Qnn_diag = self.Knn_diag - np.diag(self.Qnn) + 1./self.ep_approx.tau_tilde
|
self.Qnn_diag = self.Knn_diag - np.diag(self.Qnn) + 1./self.ep_approx.tau_tilde
|
||||||
self.Qi = -np.dot(self.KmnTaut.T, np.dot(self.Woodbury_inv,self.KmnTaut)) + np.diag(self.Taut)
|
self.Qi = -np.dot(self.KmnTaut.T, np.dot(self.Woodbury_inv,self.KmnTaut)) + np.diag(self.Taut)
|
||||||
self.hld = 0.5*np.sum(np.log(self.Diag0 + 1./self.ep_approx.tau_tilde)) - self.Kmm_hld + self.Woodbury_hld
|
self.hld = 0.5*np.sum(np.log(self.Diag0 + 1./self.ep_approx.tau_tilde)) - 0.5*self.Kmm_logdet + 0.5*self.Woodbury_logdet
|
||||||
|
|
||||||
self.Diag = self.Diag0/(1.+ self.Diag0 * self.ep_approx.tau_tilde)
|
self.Diag = self.Diag0/(1.+ self.Diag0 * self.ep_approx.tau_tilde)
|
||||||
self.P = (self.Diag / self.Diag0)[:,None] * self.Knm
|
self.P = (self.Diag / self.Diag0)[:,None] * self.Knm
|
||||||
|
|
|
||||||
|
|
@ -87,14 +87,10 @@ class sparse_GP_regression(GP_regression):
|
||||||
self.V = self.beta*self.Y
|
self.V = self.beta*self.Y
|
||||||
self.psi1V = np.dot(self.psi1, self.V)
|
self.psi1V = np.dot(self.psi1, self.V)
|
||||||
self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T)
|
self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T)
|
||||||
self.Lm = jitchol(self.Kmm)
|
self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm)
|
||||||
self.Lmi = chol_inv(self.Lm)
|
|
||||||
self.Kmmi = np.dot(self.Lmi.T, self.Lmi)
|
|
||||||
self.A = mdot(self.Lmi, self.psi2, self.Lmi.T)
|
self.A = mdot(self.Lmi, self.psi2, self.Lmi.T)
|
||||||
self.B = np.eye(self.M) + self.beta * self.A
|
self.B = np.eye(self.M) + self.beta * self.A
|
||||||
self.LB = jitchol(self.B)
|
self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B)
|
||||||
self.LBi = chol_inv(self.LB)
|
|
||||||
self.Bi = np.dot(self.LBi.T, self.LBi)
|
|
||||||
self.LLambdai = np.dot(self.LBi, self.Lmi)
|
self.LLambdai = np.dot(self.LBi, self.Lmi)
|
||||||
self.trace_K = self.psi0 - np.trace(self.A)
|
self.trace_K = self.psi0 - np.trace(self.A)
|
||||||
self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi)
|
self.LBL_inv = mdot(self.Lmi.T, self.Bi, self.Lmi)
|
||||||
|
|
@ -124,7 +120,7 @@ class sparse_GP_regression(GP_regression):
|
||||||
"""
|
"""
|
||||||
A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.beta))
|
A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.beta))
|
||||||
B = -0.5*self.beta*self.D*self.trace_K
|
B = -0.5*self.beta*self.D*self.trace_K
|
||||||
C = -self.D * np.sum(np.log(np.diag(self.LB)))
|
C = -0.5*self.D * self.B_logdet
|
||||||
D = -0.5*self.beta*self.trYYT
|
D = -0.5*self.beta*self.trYYT
|
||||||
E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv)
|
E = +0.5*np.sum(self.psi1VVpsi1 * self.LBL_inv)
|
||||||
return A+B+C+D+E
|
return A+B+C+D+E
|
||||||
|
|
|
||||||
|
|
@ -78,23 +78,23 @@ def jitchol(A,maxtries=5):
|
||||||
|
|
||||||
def pdinv(A):
|
def pdinv(A):
|
||||||
"""
|
"""
|
||||||
Arguments
|
|
||||||
---------
|
|
||||||
:param A: A DxD pd numpy array
|
:param A: A DxD pd numpy array
|
||||||
|
|
||||||
Returns
|
:rval Ai: the inverse of A
|
||||||
-------
|
:rtype Ai: np.ndarray
|
||||||
inv : the inverse of A
|
:rval L: the Cholesky decomposition of A
|
||||||
hld: 0.5* the log of the determinant of A
|
:rtype L: np.ndarray
|
||||||
|
:rval Li: the Cholesky decomposition of Ai
|
||||||
|
:rtype Li: np.ndarray
|
||||||
|
:rval logdet: the log of the determinant of A
|
||||||
|
:rtype logdet: float64
|
||||||
"""
|
"""
|
||||||
L = jitchol(A)
|
L = jitchol(A)
|
||||||
hld = np.sum(np.log(np.diag(L)))
|
logdet = 2.*np.sum(np.log(np.diag(L)))
|
||||||
|
Li = chol_inv(L)
|
||||||
|
Ai = np.dot(Li.T,Li) #TODO: get the flapack routine form multiplying triangular matrices
|
||||||
|
|
||||||
inv = sp.lib.lapack.flapack.dpotri(L)[0]
|
return Ai, L, Li, logdet
|
||||||
# inv = linalg.flapack.dpotri(L,lower = 1)[0]
|
|
||||||
inv = np.tril(inv)+np.tril(inv,-1).T
|
|
||||||
|
|
||||||
return inv, hld
|
|
||||||
|
|
||||||
|
|
||||||
def chol_inv(L):
|
def chol_inv(L):
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue