mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 14:03:20 +02:00
Merge branch 'devel' into mrd
This commit is contained in:
commit
1f19d40d89
24 changed files with 553 additions and 419 deletions
|
|
@ -16,29 +16,6 @@ def backsub_both_sides(L,X):
|
|||
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||
|
||||
class FITC(sparse_GP):
|
||||
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
||||
|
||||
self.Z = Z
|
||||
self.M = self.Z.shape[0]
|
||||
self.true_precision = likelihood.precision
|
||||
|
||||
|
||||
#ERASEME
|
||||
#N = likelihood.Y.size
|
||||
#self.beta_star = np.random.rand(N,1)
|
||||
#self.Kmm_ = kernel.K(self.Z).copy()
|
||||
#self.Kmmi_,a,b,c = pdinv(self.Kmm_)
|
||||
#self.psi1_ = kernel.K(self.Z,X).copy()
|
||||
|
||||
sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False)
|
||||
|
||||
def _set_params(self, p):
|
||||
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
self.scale_factor = 1.
|
||||
self._computations()
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
|
|
@ -56,34 +33,15 @@ class FITC(sparse_GP):
|
|||
self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
#@profile
|
||||
def _computations(self):
|
||||
|
||||
#factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
|
||||
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
|
||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
|
||||
self.Diag0 = self.psi0 - np.diag(self.Qnn)
|
||||
|
||||
#TODO eraseme
|
||||
"""
|
||||
self.psi1 = self.psi1_
|
||||
self.Lm = jitchol(self.Kmm_)
|
||||
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||
#self.true_psi1 = self.kern.K(self.Z,self.X)
|
||||
#self.Qnn = mdot(self.true_psi1.T,self.Lmi.T,self.Lmi,self.true_psi1)
|
||||
self.Kmmi, a,b,c = pdinv(self.Kmm)
|
||||
self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1)
|
||||
#self.Diag0 = self.psi0 #- np.diag(self.Qnn)
|
||||
self.Diag0 = - np.diag(self.Qnn)
|
||||
#Kmmi,Lm,Lmi,logdetK = pdinv(self.Kmm)
|
||||
#self.Lambda = self.Kmmi_ + mdot(self.Kmmi_,self.psi1_,self.beta_star*self.psi1_.T,self.Kmmi_) + np.eye(self.M)*100
|
||||
#self.Lambdai, LLm, LLmi, self.logdetLambda = pdinv(self.Lambda)
|
||||
"""
|
||||
|
||||
#TODO uncomment
|
||||
self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision
|
||||
self.V_star = self.beta_star * self.likelihood.Y
|
||||
|
||||
|
|
@ -92,7 +50,7 @@ class FITC(sparse_GP):
|
|||
raise NotImplementedError
|
||||
else:
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
assert self.likelihood.D == 1
|
||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
|
@ -107,74 +65,122 @@ class FITC(sparse_GP):
|
|||
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
||||
|
||||
# dlogbeta_dtheta
|
||||
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
|
||||
b_psi1_Ki = self.beta_star * Kmmipsi1.T
|
||||
Ki_pbp_Ki = np.dot(Kmmipsi1,b_psi1_Ki)
|
||||
dlogB_dpsi0 = -.5*self.kern.dKdiag_dtheta(self.beta_star,X=self.X)
|
||||
dlogB_dpsi1 = self.kern.dK_dtheta(b_psi1_Ki,self.X,self.Z)
|
||||
dlogB_dKmm = -.5*self.kern.dK_dtheta(Ki_pbp_Ki,X=self.Z)
|
||||
self.dlogbeta_dtheta = dlogB_dpsi0 + dlogB_dpsi1 + dlogB_dKmm
|
||||
|
||||
# dyby_dtheta
|
||||
Kmmi = np.dot(self.Lmi.T,self.Lmi)
|
||||
LBiLmi = np.dot(self.LBi,self.Lmi)
|
||||
LBL_inv = np.dot(LBiLmi.T,LBiLmi)
|
||||
VVT = np.outer(self.V_star,self.V_star)
|
||||
VV_p_Ki = np.dot(VVT,Kmmipsi1.T)
|
||||
Ki_pVVp_Ki = np.dot(Kmmipsi1,VV_p_Ki)
|
||||
dyby_dpsi0 = .5 * self.kern.dKdiag_dtheta(self.V_star**2,self.X)
|
||||
|
||||
dyby_dpsi1 = 0
|
||||
dyby_dKmm = 0
|
||||
dyby_dtheta = dyby_dpsi0
|
||||
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
|
||||
dyby_dpsi1 = -V_n**2 * np.dot(psi1_n[None,:],Kmmi)
|
||||
dyby_dtheta += self.kern.dK_dtheta(dyby_dpsi1,X_n[:,None],self.Z)
|
||||
|
||||
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
|
||||
psin_K = np.dot(psi1_n[None,:],Kmmi)
|
||||
tmp = np.dot(psin_K.T,psin_K)
|
||||
dyby_dKmm = .5*V_n**2 * tmp
|
||||
dyby_dtheta += self.kern.dK_dtheta(dyby_dKmm,self.Z)
|
||||
self.dyby_dtheta = dyby_dtheta
|
||||
|
||||
# dlogB_dtheta : C
|
||||
|
||||
#C_B
|
||||
dC_B = -.5*Kmmi
|
||||
C_B = self.kern.dK_dtheta(dC_B,self.Z) #check
|
||||
|
||||
#C_A
|
||||
LBiLmi = np.dot(self.LBi,self.Lmi)
|
||||
LBL_inv = np.dot(LBiLmi.T,LBiLmi)
|
||||
dC_AA = .5*LBL_inv
|
||||
C_AA = self.kern.dK_dtheta(dC_AA,self.Z) #check
|
||||
|
||||
#C_AB
|
||||
psi1beta = self.psi1*self.beta_star.T
|
||||
dC_ABA = mdot(LBL_inv,psi1beta,Kmmipsi1.T)
|
||||
C_ABA = self.kern.dK_dtheta(dC_ABA,self.Z)
|
||||
dC_ABB = -np.dot(psi1beta.T,LBL_inv) #check
|
||||
C_ABB = self.kern.dK_dtheta(dC_ABB,self.X,self.Z) #check
|
||||
|
||||
# C_ABC
|
||||
H = self.Kmm + mdot(self.psi1,self.beta_star*self.psi1.T)
|
||||
Hi, LH, LHi, logdetH = pdinv(H)
|
||||
betapsi1TLmiLBi = np.dot(psi1beta.T,LBiLmi.T)
|
||||
alpha = np.array([np.dot(a.T,a) for a in betapsi1TLmiLBi])[:,None]
|
||||
dC_ABCA = .5 *alpha
|
||||
C_ABCA = self.kern.dKdiag_dtheta(dC_ABCA,self.X) #check
|
||||
gamma_1 = mdot(VVT,self.psi1.T,Hi)
|
||||
pHip = mdot(self.psi1.T,Hi,self.psi1)
|
||||
gamma_2 = mdot(self.beta_star*pHip,self.V_star)
|
||||
gamma_3 = self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T
|
||||
|
||||
C_ABCB = 0
|
||||
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
|
||||
dC_ABCB_n = - alpha_n * np.dot(psi1_n[None,:],Kmmi)
|
||||
C_ABCB += self.kern.dK_dtheta(dC_ABCB_n,X_n[:,None],self.Z) #check
|
||||
dA_dpsi0_1 = -0.5 * self.beta_star
|
||||
dA_dpsi0 = .5 * self.V_star**2
|
||||
|
||||
dC_dpsi0 = .5 *alpha
|
||||
dD_dpsi0 = 0.5*mdot(self.beta_star*pHip,self.V_star)**2
|
||||
dD_dpsi1 = gamma_1
|
||||
dD_dpsi1 += -mdot(psi1beta.T,Hi,self.psi1,gamma_1)
|
||||
dD_dpsi0 += -self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T
|
||||
|
||||
|
||||
dA_dpsi1 = b_psi1_Ki
|
||||
dC_dpsi1 = -np.dot(psi1beta.T,LBL_inv)
|
||||
|
||||
|
||||
dA_dKmm = -0.5 * np.dot(Kmmipsi1,b_psi1_Ki)
|
||||
dC_dKmm = -.5*Kmmi
|
||||
dC_dKmm += .5*LBL_inv
|
||||
dC_dKmm += mdot(LBL_inv,psi1beta,Kmmipsi1.T)
|
||||
dD_dKmm = -.5 * mdot(Hi,self.psi1,gamma_1)
|
||||
|
||||
dA_dpsi0_theta = self.kern.dKdiag_dtheta(dA_dpsi0,X=self.X)
|
||||
|
||||
|
||||
dA_dpsi1_theta = 0
|
||||
dA_dpsi1_X = 0
|
||||
dA_dKmm_theta = 0
|
||||
dA_dKmm_X = 0
|
||||
_dC_dpsi1_dtheta = 0
|
||||
_dC_dpsi1_dX = 0
|
||||
_dC_dKmm_dtheta = 0
|
||||
_dC_dKmm_dX = 0
|
||||
_dD_dpsi1_dtheta_1 = 0
|
||||
_dD_dpsi1_dX_1 = 0
|
||||
_dD_dKmm_dtheta_1 = 0
|
||||
_dD_dKmm_dX_1 = 0
|
||||
_dD_dpsi1_dtheta_2 = 0
|
||||
_dD_dpsi1_dX_2 = 0
|
||||
_dD_dKmm_dtheta_2 = 0
|
||||
_dD_dKmm_dX_2 = 0
|
||||
|
||||
|
||||
for psi1_n,V_n,X_n,alpha_n,gamma_n,gamma_k in zip(self.psi1.T,self.V_star,self.X,alpha,gamma_2,gamma_3):
|
||||
|
||||
C_ABCC = 0
|
||||
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
|
||||
psin_K = np.dot(psi1_n[None,:],Kmmi)
|
||||
tmp = np.dot(psin_K.T,psin_K)
|
||||
dC_ABCC = .5 * alpha_n * tmp
|
||||
C_ABCC += self.kern.dK_dtheta(dC_ABCC,self.Z) #check
|
||||
|
||||
self.dlogB_dtheta = C_B + C_AA + C_ABA + C_ABB + C_ABCA + C_ABCB + C_ABCC
|
||||
_dA_dpsi1 = -V_n**2 * np.dot(psi1_n[None,:],Kmmi)
|
||||
_dC_dpsi1 = - alpha_n * np.dot(psi1_n[None,:],Kmmi)
|
||||
_dD_dpsi1_1 = - gamma_n**2 * np.dot(psi1_n[None,:],Kmmi)
|
||||
_dD_dpsi1_2 = 2. * gamma_k * np.dot(psi1_n[None,:],Kmmi)
|
||||
|
||||
_dA_dKmm = .5*V_n**2 * np.dot(psin_K.T,psin_K)
|
||||
_dC_dKmm = .5 * alpha_n * np.dot(psin_K.T,psin_K)
|
||||
_dD_dKmm_1 = .5*gamma_n**2 * np.dot(psin_K.T,psin_K)
|
||||
_dD_dKmm_2 = - gamma_n * np.dot(psin_K.T,psin_K)
|
||||
|
||||
dA_dpsi1_theta += self.kern.dK_dtheta(_dA_dpsi1,X_n[None,:],self.Z)
|
||||
_dC_dpsi1_dtheta += self.kern.dK_dtheta(_dC_dpsi1,X_n[None,:],self.Z)
|
||||
_dD_dpsi1_dtheta_1 += self.kern.dK_dtheta(_dD_dpsi1_1,X_n[None,:],self.Z)
|
||||
_dD_dpsi1_dtheta_2 += self.kern.dK_dtheta(_dD_dpsi1_2,X_n[None,:],self.Z)
|
||||
|
||||
dA_dKmm_theta += self.kern.dK_dtheta(_dA_dKmm,self.Z)
|
||||
_dC_dKmm_dtheta += self.kern.dK_dtheta(_dC_dKmm,self.Z)
|
||||
_dD_dKmm_dtheta_1 += self.kern.dK_dtheta(_dD_dKmm_1,self.Z)
|
||||
_dD_dKmm_dtheta_2 += self.kern.dK_dtheta(_dD_dKmm_2,self.Z)
|
||||
|
||||
dA_dpsi1_X += self.kern.dK_dX(_dA_dpsi1.T,self.Z,X_n[None,:])
|
||||
_dC_dpsi1_dX += self.kern.dK_dX(_dC_dpsi1.T,self.Z,X_n[None,:])
|
||||
_dD_dpsi1_dX_1 += self.kern.dK_dX(_dD_dpsi1_1.T,self.Z,X_n[None,:])
|
||||
_dD_dpsi1_dX_2 += self.kern.dK_dX(_dD_dpsi1_2.T,self.Z,X_n[None,:])
|
||||
|
||||
dA_dKmm_X += 2.*self.kern.dK_dX(_dA_dKmm,self.Z)
|
||||
_dC_dKmm_dX += 2.*self.kern.dK_dX(_dC_dKmm,self.Z)
|
||||
_dD_dKmm_dX_1 += 2.*self.kern.dK_dX(_dD_dKmm_1,self.Z)
|
||||
_dD_dKmm_dX_2 += 2.*self.kern.dK_dX(_dD_dKmm_2,self.Z)
|
||||
|
||||
|
||||
|
||||
dA_dX_1 = self.kern.dK_dX(dA_dpsi1.T,self.Z,self.X) + 2. * self.kern.dK_dX(dA_dKmm,X=self.Z)
|
||||
dA_dtheta_1 = self.kern.dKdiag_dtheta(dA_dpsi0_1,X=self.X) + self.kern.dK_dtheta(dA_dpsi1,self.X,self.Z) + self.kern.dK_dtheta(dA_dKmm,X=self.Z)
|
||||
|
||||
dA_dtheta_2 = dA_dpsi0_theta + dA_dpsi1_theta + dA_dKmm_theta
|
||||
dA_dX_2 = dA_dpsi1_X + dA_dKmm_X
|
||||
|
||||
self.dA_dtheta = dA_dtheta_1 + dA_dtheta_2
|
||||
self.dA_dX = dA_dX_1 + dA_dX_2
|
||||
|
||||
|
||||
self.dlogB_dtheta = self.kern.dK_dtheta(dC_dKmm,self.Z) + self.kern.dK_dtheta(dC_dpsi1,self.X,self.Z) + self.kern.dKdiag_dtheta(dC_dpsi0,self.X) + _dC_dpsi1_dtheta + _dC_dKmm_dtheta
|
||||
self.dlogB_dX = 2.*self.kern.dK_dX(dC_dKmm,self.Z) + self.kern.dK_dX(dC_dpsi1.T,self.Z,self.X) + _dC_dpsi1_dX + _dC_dKmm_dX
|
||||
|
||||
|
||||
self.dD_dtheta = self.kern.dKdiag_dtheta(dD_dpsi0,self.X) + self.kern.dK_dtheta(dD_dKmm,self.Z) + self.kern.dK_dtheta(dD_dpsi1,self.X,self.Z) + _dD_dpsi1_dtheta_2 + _dD_dKmm_dtheta_2 + _dD_dpsi1_dtheta_1 + _dD_dKmm_dtheta_1
|
||||
|
||||
self.dD_dX = 2.*self.kern.dK_dX(dD_dKmm,self.Z) + self.kern.dK_dX(dD_dpsi1.T,self.Z,self.X) + _dD_dpsi1_dX_2 + _dD_dKmm_dX_2 + _dD_dpsi1_dX_1 + _dD_dKmm_dX_1
|
||||
|
||||
|
||||
|
||||
|
||||
# the partial derivative vector for the likelihood
|
||||
|
|
@ -185,64 +191,117 @@ class FITC(sparse_GP):
|
|||
raise NotImplementedError, "heteroscedatic derivates not implemented"
|
||||
else:
|
||||
# likelihood is not heterscedatic
|
||||
self.partial_for_likelihood = 0 #FIXME
|
||||
#self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
||||
#self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision)
|
||||
#self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
||||
dbstar_dnoise = self.likelihood.precision * (self.beta_star**2 * self.Diag0[:,None] - self.beta_star)
|
||||
Lmi_psi1 = mdot(self.Lmi,self.psi1)
|
||||
LBiLmipsi1 = np.dot(self.LBi,Lmi_psi1)
|
||||
aux_0 = np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
|
||||
aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
|
||||
aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V)
|
||||
|
||||
dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise)
|
||||
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T)
|
||||
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T)
|
||||
|
||||
dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T)
|
||||
alpha = mdot(LBiLmipsi1,self.V_star)
|
||||
alpha_ = mdot(LBiLmipsi1.T,alpha)
|
||||
dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise )
|
||||
|
||||
dD_dnoise_1 = mdot(self.V_star.T,self.psi1.T,self.Lmi.T,self.LBi.T,self.LBi,self.Lmi,self.psi1,dbstar_dnoise*self.likelihood.Y)
|
||||
dD_dnoise_2 = 0.5*mdot(self.V_star.T,self.psi1.T,Hi,self.psi1,dbstar_dnoise*self.psi1.T,Hi,self.psi1,self.V_star)
|
||||
dD_dnoise = dD_dnoise_1 + dD_dnoise_2
|
||||
|
||||
self.partial_for_likelihood = dA_dnoise + dC_dnoise + dD_dnoise
|
||||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
|
||||
"""
|
||||
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
#B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + C + D # +B
|
||||
"""
|
||||
return A+C
|
||||
|
||||
return A + C + D
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
pass
|
||||
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
|
||||
def dL_dtheta(self):
|
||||
#dL_dtheta = self.dlogB_dtheta
|
||||
#dL_dtheta = self.dyby_dtheta
|
||||
#dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta
|
||||
dL_dtheta = self.dlogB_dtheta
|
||||
dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta + self.dlogB_dtheta
|
||||
"""
|
||||
dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T, self.Z, self.X, self.X_variance)
|
||||
dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X)
|
||||
dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X)
|
||||
"""
|
||||
dL_dtheta = self.dA_dtheta + self.dlogB_dtheta + self.dD_dtheta
|
||||
return dL_dtheta
|
||||
|
||||
def dL_dZ(self):
|
||||
dL_dZ = np.zeros(self.M)
|
||||
"""
|
||||
dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm, self.Z) # factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1, self.Z, self.X, self.X_variance)
|
||||
dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
dL_dZ += self.kern.dK_dX(self.dL_dpsi1, self.Z, self.X)
|
||||
"""
|
||||
dL_dZ = self.dA_dX + self.dlogB_dX + self.dD_dX
|
||||
return dL_dZ
|
||||
|
||||
def _raw_predict(self, Xnew, which_parts, full_cov=False):
|
||||
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.likelihood.precision.flatten())
|
||||
self.Diag = self.Diag0 * Iplus_Dprod_i
|
||||
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
||||
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
||||
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
||||
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
|
||||
self.RPT = np.dot(self.R,self.P.T)
|
||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||
self.w = self.Diag * self.likelihood.v_tilde
|
||||
self.gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.gamma)
|
||||
|
||||
"""
|
||||
Make a prediction for the generalized FITC model
|
||||
|
||||
Arguments
|
||||
---------
|
||||
X : Input prediction data - Nx1 numpy array (floats)
|
||||
"""
|
||||
# q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T)
|
||||
|
||||
|
||||
|
||||
# Ci = I + (RPT0)Di(RPT0).T
|
||||
# C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T
|
||||
# = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T
|
||||
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
||||
# = I - V.T * V
|
||||
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
||||
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
|
||||
C = np.eye(self.M) - np.dot(V.T,V)
|
||||
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
||||
#self.C = C
|
||||
#self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T
|
||||
#self.mu_u = mu_u
|
||||
#self.U = U
|
||||
# q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T)
|
||||
mu_H = np.dot(mu_u,self.mu)
|
||||
self.mu_H = mu_H
|
||||
Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T))
|
||||
# q(f_star|y) = N(f_star|mu_star,sigma2_star)
|
||||
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
|
||||
KR0T = np.dot(Kx.T,self.Lmi.T)
|
||||
mu_star = np.dot(KR0T,mu_H)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(Xnew,which_parts=which_parts)
|
||||
var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T))
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts)
|
||||
Kxx_ = self.kern.K(Xnew,which_parts=which_parts) # TODO: RA, is this line needed?
|
||||
var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) # TODO: RA, is this line needed?
|
||||
var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None]
|
||||
return mu_star[:,None],var
|
||||
else:
|
||||
raise NotImplementedError, "homoscedastic fitc not implemented"
|
||||
"""
|
||||
Kx = self.kern.K(self.Z, Xnew)
|
||||
mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(Xnew)
|
||||
var = Kxx - mdot(Kx.T, (self.Kmmi - self.C/self.scale_factor**2), Kx) #NOTE this won't work for plotting
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(Xnew)
|
||||
var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.C/self.scale_factor**2, Kx),0)
|
||||
return mu,var[:,None]
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -3,10 +3,11 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from scipy import linalg
|
||||
import pylab as pb
|
||||
from .. import kern
|
||||
from ..core import model
|
||||
from ..util.linalg import pdinv, mdot
|
||||
from ..util.linalg import pdinv, mdot, tdot
|
||||
from ..util.plot import gpplot, x_frame1D, x_frame2D, Tango
|
||||
from ..likelihoods import EP
|
||||
|
||||
|
|
@ -58,13 +59,12 @@ class GP(model):
|
|||
"""
|
||||
TODO: one day we might like to learn Z by gradient methods?
|
||||
"""
|
||||
#FIXME: this doesn;t live here.
|
||||
return np.zeros_like(self.Z)
|
||||
|
||||
def _set_params(self, p):
|
||||
self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()])
|
||||
# self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
|
||||
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
|
||||
|
||||
self.likelihood._set_params(p[self.kern.Nparam_transformed():])
|
||||
|
||||
self.K = self.kern.K(self.X)
|
||||
self.K += self.likelihood.covariance_matrix
|
||||
|
|
@ -73,10 +73,14 @@ class GP(model):
|
|||
|
||||
# the gradient of the likelihood wrt the covariance matrix
|
||||
if self.likelihood.YYT is None:
|
||||
alpha = np.dot(self.Ki, self.likelihood.Y)
|
||||
self.dL_dK = 0.5 * (np.dot(alpha, alpha.T) - self.D * self.Ki)
|
||||
#alpha = np.dot(self.Ki, self.likelihood.Y)
|
||||
alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1)
|
||||
|
||||
self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki)
|
||||
else:
|
||||
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
|
||||
#tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
|
||||
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
|
||||
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
||||
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
|
||||
|
||||
def _get_params(self):
|
||||
|
|
@ -100,7 +104,9 @@ class GP(model):
|
|||
Computes the model fit using YYT if it's available
|
||||
"""
|
||||
if self.likelihood.YYT is None:
|
||||
return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
|
||||
return -0.5 * np.sum(np.square(tmp))
|
||||
#return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
|
||||
else:
|
||||
return -0.5 * np.sum(np.multiply(self.Ki, self.likelihood.YYT))
|
||||
|
||||
|
|
@ -123,18 +129,15 @@ class GP(model):
|
|||
"""
|
||||
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
|
||||
|
||||
def _raw_predict(self, _Xnew, which_parts='all', full_cov=False):
|
||||
def _raw_predict(self, _Xnew, which_parts='all', full_cov=False,stop=False):
|
||||
"""
|
||||
Internal helper function for making predictions, does not account
|
||||
for normalization or likelihood
|
||||
|
||||
#TODO: which_parts does nothing
|
||||
|
||||
|
||||
"""
|
||||
Kx = self.kern.K(self.X, _Xnew,which_parts=which_parts)
|
||||
mu = np.dot(np.dot(Kx.T, self.Ki), self.likelihood.Y)
|
||||
KiKx = np.dot(self.Ki, Kx)
|
||||
Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T
|
||||
#KiKx = np.dot(self.Ki, Kx)
|
||||
KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
|
||||
mu = np.dot(KiKx.T, self.likelihood.Y)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(_Xnew, which_parts=which_parts)
|
||||
var = Kxx - np.dot(KiKx.T, Kx)
|
||||
|
|
@ -142,6 +145,8 @@ class GP(model):
|
|||
Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts)
|
||||
var = Kxx - np.sum(np.multiply(KiKx, Kx), 0)
|
||||
var = var[:, None]
|
||||
if stop:
|
||||
debug_this
|
||||
return mu, var
|
||||
|
||||
|
||||
|
|
@ -178,7 +183,8 @@ class GP(model):
|
|||
|
||||
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False):
|
||||
"""
|
||||
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
|
||||
Plot the GP's view of the world, where the data is normalized and the
|
||||
likelihood is Gaussian.
|
||||
|
||||
:param samples: the number of a posteriori samples to plot
|
||||
:param which_data: which if the training data to plot (default all)
|
||||
|
|
@ -193,8 +199,8 @@ class GP(model):
|
|||
- In two dimsensions, a contour-plot shows the mean predicted function
|
||||
- In higher dimensions, we've no implemented this yet !TODO!
|
||||
|
||||
Can plot only part of the data and part of the posterior functions using which_data and which_functions
|
||||
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
|
||||
Can plot only part of the data and part of the posterior functions
|
||||
using which_data and which_functions
|
||||
"""
|
||||
if which_data == 'all':
|
||||
which_data = slice(None)
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ class GPLVM(GP):
|
|||
return np.hstack((self.X.flatten(), GP._get_params(self)))
|
||||
|
||||
def _set_params(self,x):
|
||||
self.X = x[:self.X.size].reshape(self.N,self.Q).copy()
|
||||
self.X = x[:self.N*self.Q].reshape(self.N,self.Q).copy()
|
||||
GP._set_params(self, x[self.X.size:])
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@ from sparse_GPLVM import sparse_GPLVM
|
|||
from Bayesian_GPLVM import Bayesian_GPLVM
|
||||
from mrd import MRD
|
||||
from generalized_FITC import generalized_FITC
|
||||
#from FITC import FITC
|
||||
from FITC import FITC
|
||||
|
|
|
|||
|
|
@ -3,17 +3,12 @@
|
|||
|
||||
import numpy as np
|
||||
import pylab as pb
|
||||
from ..util.linalg import mdot, jitchol, tdot, symmetrify
|
||||
from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides
|
||||
from ..util.plot import gpplot
|
||||
from .. import kern
|
||||
from GP import GP
|
||||
from scipy import linalg
|
||||
|
||||
def backsub_both_sides(L, X):
|
||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
||||
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
||||
|
||||
class sparse_GP(GP):
|
||||
"""
|
||||
Variational sparse GP model
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ class warpedGP(GP):
|
|||
self.warping_function = TanhWarpingFunction_d(warping_terms)
|
||||
self.warping_params = (np.random.randn(self.warping_function.n_terms*3+1,) * 1)
|
||||
|
||||
Y = self._scale_data(Y)
|
||||
self.has_uncertain_inputs = False
|
||||
self.Y_untransformed = Y.copy()
|
||||
self.predict_in_warped_space = False
|
||||
|
|
@ -30,6 +31,14 @@ class warpedGP(GP):
|
|||
|
||||
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||
|
||||
def _scale_data(self, Y):
|
||||
self._Ymax = Y.max()
|
||||
self._Ymin = Y.min()
|
||||
return (Y-self._Ymin)/(self._Ymax-self._Ymin) - 0.5
|
||||
|
||||
def _unscale_data(self, Y):
|
||||
return (Y + 0.5)*(self._Ymax - self._Ymin) + self._Ymin
|
||||
|
||||
def _set_params(self, x):
|
||||
self.warping_params = x[:self.warping_function.num_parameters]
|
||||
Y = self.transform_data()
|
||||
|
|
@ -79,5 +88,5 @@ class warpedGP(GP):
|
|||
if self.predict_in_warped_space:
|
||||
mu = self.warping_function.f_inv(mu, self.warping_params)
|
||||
var = self.warping_function.f_inv(var, self.warping_params)
|
||||
|
||||
mu = self._unscale_data(mu)
|
||||
return mu, var
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue