Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Nicolo Fusi 2013-05-13 17:21:45 +01:00
commit ccde6c421f
19 changed files with 464 additions and 321 deletions

View file

@ -17,20 +17,7 @@ def backsub_both_sides(L,X):
class FITC(sparse_GP):
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
self.Z = Z
self.M = self.Z.shape[0]
self.true_precision = likelihood.precision
#ERASEME
#N = likelihood.Y.size
#self.beta_star = np.random.rand(N,1)
#self.Kmm_ = kernel.K(self.Z).copy()
#self.Kmmi_,a,b,c = pdinv(self.Kmm_)
#self.psi1_ = kernel.K(self.Z,X).copy()
sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False)
sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=Z, X_variance=None, normalize_X=False)
def _set_params(self, p):
self.Z = p[:self.M*self.Q].reshape(self.M, self.Q)
@ -60,33 +47,19 @@ class FITC(sparse_GP):
#factor Kmm
self.Lm = jitchol(self.Kmm)
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
Lmipsi1 = np.dot(self.Lmi,self.psi1)
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
self.Diag0 = self.psi0 - np.diag(self.Qnn)
#TODO eraseme
"""
self.psi1 = self.psi1_
self.Lm = jitchol(self.Kmm_)
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
Lmipsi1 = np.dot(self.Lmi,self.psi1)
#self.true_psi1 = self.kern.K(self.Z,self.X)
#self.Qnn = mdot(self.true_psi1.T,self.Lmi.T,self.Lmi,self.true_psi1)
self.Kmmi, a,b,c = pdinv(self.Kmm)
self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1)
#self.Diag0 = self.psi0 #- np.diag(self.Qnn)
self.Diag0 = - np.diag(self.Qnn)
#Kmmi,Lm,Lmi,logdetK = pdinv(self.Kmm)
#self.Lambda = self.Kmmi_ + mdot(self.Kmmi_,self.psi1_,self.beta_star*self.psi1_.T,self.Kmmi_) + np.eye(self.M)*100
#self.Lambdai, LLm, LLmi, self.logdetLambda = pdinv(self.Lambda)
"""
#TODO uncomment
self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision
self.V_star = self.beta_star * self.likelihood.Y
# The rather complex computations of self.A
if self.has_uncertain_inputs:
raise NotImplementedError
@ -94,6 +67,7 @@ class FITC(sparse_GP):
if self.likelihood.is_heteroscedastic:
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
#tmp = self.psi1 * (np.sqrt(self.true_beta_star.flatten().reshape(1, self.N))) #TODO eraseme
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
self.A = tdot(tmp)
@ -102,19 +76,23 @@ class FITC(sparse_GP):
self.LB = jitchol(self.B)
self.LBi,info = linalg.lapack.flapack.dtrtrs(self.LB,np.eye(self.M),lower=1)
self.psi1V = np.dot(self.psi1, self.V_star)
#self.psi1V = np.dot(self.psi1, self.true_V_star) # TODO eraseme
# back substutue C into psi1V
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
# A:
# dlogbeta_dtheta
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
b_psi1_Ki = self.beta_star * Kmmipsi1.T
Ki_pbp_Ki = np.dot(Kmmipsi1,b_psi1_Ki)
dlogB_dpsi0 = -.5*self.kern.dKdiag_dtheta(self.beta_star,X=self.X)
dlogB_dpsi1 = self.kern.dK_dtheta(b_psi1_Ki,self.X,self.Z)
dlogB_dKmm = -.5*self.kern.dK_dtheta(Ki_pbp_Ki,X=self.Z)
self.dlogbeta_dtheta = dlogB_dpsi0 + dlogB_dpsi1 + dlogB_dKmm
dA_dpsi0 = -0.5 * self.beta_star
dA_dpsi1 = b_psi1_Ki
dA_dKmm = -0.5 * np.dot(Kmmipsi1,b_psi1_Ki)
dA_dtheta_1 = self.kern.dKdiag_dtheta(dA_dpsi0,X=self.X) + self.kern.dK_dtheta(dA_dpsi1,self.X,self.Z) + self.kern.dK_dtheta(dA_dKmm,X=self.Z)
dA_dX_1 = self.kern.dK_dX(dA_dpsi1.T,self.Z,self.X) + 2. * self.kern.dK_dX(dA_dKmm,X=self.Z)
# dyby_dtheta
Kmmi = np.dot(self.Lmi.T,self.Lmi)
@ -123,58 +101,140 @@ class FITC(sparse_GP):
Ki_pVVp_Ki = np.dot(Kmmipsi1,VV_p_Ki)
dyby_dpsi0 = .5 * self.kern.dKdiag_dtheta(self.V_star**2,self.X)
dyby_dpsi1 = 0
dyby_dKmm = 0
dyby_dtheta = dyby_dpsi0
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
dyby_dpsi1 = -V_n**2 * np.dot(psi1_n[None,:],Kmmi)
dyby_dtheta += self.kern.dK_dtheta(dyby_dpsi1,X_n[:,None],self.Z)
dA_dpsi0 = .5 * self.V_star**2
dA_dpsi0_theta = self.kern.dKdiag_dtheta(dA_dpsi0,X=self.X)
dA_dpsi1_theta = 0
dA_dpsi1_X = 0
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
dA_dpsi1 = -V_n**2 * np.dot(psi1_n[None,:],Kmmi)
dA_dpsi1_theta += self.kern.dK_dtheta(dA_dpsi1,X_n[None,:],self.Z)
dA_dpsi1_X += self.kern.dK_dX(dA_dpsi1.T,self.Z,X_n[None,:])
dA_dKmm_theta = 0
dA_dKmm_X = 0
for psi1_n,V_n,X_n in zip(self.psi1.T,self.V_star,self.X):
psin_K = np.dot(psi1_n[None,:],Kmmi)
tmp = np.dot(psin_K.T,psin_K)
dyby_dKmm = .5*V_n**2 * tmp
dyby_dtheta += self.kern.dK_dtheta(dyby_dKmm,self.Z)
self.dyby_dtheta = dyby_dtheta
dA_dKmm = .5*V_n**2 * tmp
dA_dKmm_theta += self.kern.dK_dtheta(dA_dKmm,self.Z)
dA_dKmm_X += 2.*self.kern.dK_dX(dA_dKmm,self.Z)
dA_dtheta_2 = dA_dpsi0_theta + dA_dpsi1_theta + dA_dKmm_theta
dA_dX_2 = dA_dpsi1_X + dA_dKmm_X
self.dA_dtheta = dA_dtheta_1 + dA_dtheta_2
self.dA_dX = dA_dX_1 + dA_dX_2
# dlogB_dtheta : C
#C_B
dC_B = -.5*Kmmi
C_B = self.kern.dK_dtheta(dC_B,self.Z) #check
dC_dKmm = -.5*Kmmi
#C_A
LBiLmi = np.dot(self.LBi,self.Lmi)
LBL_inv = np.dot(LBiLmi.T,LBiLmi)
dC_AA = .5*LBL_inv
C_AA = self.kern.dK_dtheta(dC_AA,self.Z) #check
dC_dKmm += .5*LBL_inv
#C_AB
psi1beta = self.psi1*self.beta_star.T
dC_ABA = mdot(LBL_inv,psi1beta,Kmmipsi1.T)
C_ABA = self.kern.dK_dtheta(dC_ABA,self.Z)
dC_ABB = -np.dot(psi1beta.T,LBL_inv) #check
C_ABB = self.kern.dK_dtheta(dC_ABB,self.X,self.Z) #check
dC_dKmm += mdot(LBL_inv,psi1beta,Kmmipsi1.T)
dC_dpsi1 = -np.dot(psi1beta.T,LBL_inv)
# C_ABC
betapsi1TLmiLBi = np.dot(psi1beta.T,LBiLmi.T)
alpha = np.array([np.dot(a.T,a) for a in betapsi1TLmiLBi])[:,None]
dC_ABCA = .5 *alpha
C_ABCA = self.kern.dKdiag_dtheta(dC_ABCA,self.X) #check
dC_dpsi0 = .5 *alpha
C_ABCB = 0
_dC_dpsi1_dtheta = 0
_dC_dpsi1_dX = 0
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
dC_ABCB_n = - alpha_n * np.dot(psi1_n[None,:],Kmmi)
C_ABCB += self.kern.dK_dtheta(dC_ABCB_n,X_n[:,None],self.Z) #check
_dC_dpsi1 = - alpha_n * np.dot(psi1_n[None,:],Kmmi)
_dC_dpsi1_dtheta += self.kern.dK_dtheta(_dC_dpsi1,X_n[None,:],self.Z) #check
_dC_dpsi1_dX += self.kern.dK_dX(_dC_dpsi1.T,self.Z,X_n[None,:])
C_ABCC = 0
_dC_dKmm_dtheta = 0
_dC_dKmm_dX = 0
for psi1_n,alpha_n,X_n in zip(self.psi1.T,alpha,self.X):
psin_K = np.dot(psi1_n[None,:],Kmmi)
tmp = np.dot(psin_K.T,psin_K)
dC_ABCC = .5 * alpha_n * tmp
C_ABCC += self.kern.dK_dtheta(dC_ABCC,self.Z) #check
_dC_dKmm = .5 * alpha_n * np.dot(psin_K.T,psin_K)
_dC_dKmm_dtheta += self.kern.dK_dtheta(_dC_dKmm,self.Z) #check
_dC_dKmm_dX += 2.*self.kern.dK_dX(_dC_dKmm,self.Z)
self.dlogB_dtheta = C_B + C_AA + C_ABA + C_ABB + C_ABCA + C_ABCB + C_ABCC
#self.dlogB_dtheta = dCB_dKmm_theta + dCAA_dKmm_theta + dCABA_dKmm_theta + dCABB_dpsi1_theta + dCABCA_dpsi0_theta + dCABCB_dpsi1_theta + dCABCC_dKmm_theta
self.dlogB_dtheta = self.kern.dK_dtheta(dC_dKmm,self.Z) + self.kern.dK_dtheta(dC_dpsi1,self.X,self.Z) + self.kern.dKdiag_dtheta(dC_dpsi0,self.X) + _dC_dpsi1_dtheta + _dC_dKmm_dtheta
self.dlogB_dX = 2.*self.kern.dK_dX(dC_dKmm,self.Z) + self.kern.dK_dX(dC_dpsi1.T,self.Z,self.X) + _dC_dpsi1_dX + _dC_dKmm_dX
# dD_dtheta
H = self.Kmm + mdot(self.psi1,self.beta_star*self.psi1.T)
Hi, LH, LHi, logdetH = pdinv(H)
# D_B
gamma_1 = mdot(VVT,self.psi1.T,Hi)
dD_B = gamma_1
D_B = self.kern.dK_dtheta(dD_B,self.X,self.Z)
dD_dpsi1 = gamma_1
# D_C
dD_CA = -.5 * mdot(Hi,self.psi1,gamma_1)
D_CA = self.kern.dK_dtheta(dD_CA,self.Z)
dD_dKmm = -.5 * mdot(Hi,self.psi1,gamma_1)
# D_CB
dD_CBA = - mdot(psi1beta.T,Hi,self.psi1,gamma_1)
D_CBA = self.kern.dK_dtheta(dD_CBA,self.X,self.Z)
dD_dpsi1 += -mdot(psi1beta.T,Hi,self.psi1,gamma_1)
# D_CBB
pHip = mdot(self.psi1.T,Hi,self.psi1)
gamma_2 = mdot(self.beta_star*pHip,self.V_star)
D_CBBA = .5 * self.kern.dKdiag_dtheta(gamma_2**2,self.X)
dD_dpsi0 = 0.5*mdot(self.beta_star*pHip,self.V_star)**2
_dD_dpsi1_dtheta_1 = 0
_dD_dpsi1_dX_1 = 0
for psi1_n,gamma_n,X_n in zip(self.psi1.T,gamma_2,self.X):
_dD_dpsi1 = - gamma_n**2 * np.dot(psi1_n[None,:],Kmmi)
_dD_dpsi1_dtheta_1 += self.kern.dK_dtheta(_dD_dpsi1,X_n[None,:],self.Z)
_dD_dpsi1_dX_1 += self.kern.dK_dX(_dD_dpsi1.T,self.Z,X_n[None,:])
_dD_dKmm_dtheta_1 = 0
_dD_dKmm_dX_1 = 0
for psi1_n,gamma_n,X_n in zip(self.psi1.T,gamma_2,self.X):
psin_K = np.dot(psi1_n[None,:],Kmmi)
_dD_dKmm = .5*gamma_n**2 * np.dot(psin_K.T,psin_K)
_dD_dKmm_dtheta_1 += self.kern.dK_dtheta(_dD_dKmm,self.Z)
_dD_dKmm_dX_1 += 2.*self.kern.dK_dX(_dD_dKmm,self.Z)
# D_A
gamma_3 = self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T
dD_AA = - gamma_3
D_AA = self.kern.dKdiag_dtheta(dD_AA,self.X)
dD_dpsi0 += -self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T
_dD_dpsi1_dtheta_2 = 0
_dD_dpsi1_dX_2 = 0
for psi1_n,gamma_n,X_n in zip(self.psi1.T,gamma_3,self.X):
_dD_dpsi = 2. * gamma_n * np.dot(psi1_n[None,:],Kmmi)
_dD_dpsi1_dtheta_2 += self.kern.dK_dtheta(_dD_dpsi,X_n[None,:],self.Z)
_dD_dpsi1_dX_2 += self.kern.dK_dX(_dD_dpsi.T,self.Z,X_n[None,:])
_dD_dKmm_dtheta_2 = 0
_dD_dKmm_dX_2 = 0
for psi1_n,gamma_n,X_n in zip(self.psi1.T,gamma_3,self.X):
psin_K = np.dot(psi1_n[None,:],Kmmi)
tmp = np.dot(psin_K.T,psin_K)
dD_AC = - gamma_n * tmp
_dD_dKmm_dtheta_2 += self.kern.dK_dtheta(dD_AC,self.Z)
_dD_dKmm_dX_2 += 2.*self.kern.dK_dX(dD_AC,self.Z)
self.dD_dtheta = D_AA + _dD_dpsi1_dtheta_2 + _dD_dKmm_dtheta_2 + D_B + D_CA + D_CBA + D_CBBA + _dD_dpsi1_dtheta_1 + _dD_dKmm_dtheta_1
self.dD_dtheta = self.kern.dKdiag_dtheta(dD_dpsi0,self.X) + self.kern.dK_dtheta(dD_dKmm,self.Z) + self.kern.dK_dtheta(dD_dpsi1.T,self.Z,self.X) + _dD_dpsi1_dtheta_2 + _dD_dKmm_dtheta_2 + _dD_dpsi1_dtheta_1 + _dD_dKmm_dtheta_1
self.dD_dX = 2.*self.kern.dK_dX(dD_dKmm,self.Z) + self.kern.dK_dX(dD_dpsi1.T,self.Z,self.X) + _dD_dpsi1_dX_2 + _dD_dKmm_dX_2 + _dD_dpsi1_dX_1 + _dD_dKmm_dX_1
# the partial derivative vector for the likelihood
@ -185,64 +245,120 @@ class FITC(sparse_GP):
raise NotImplementedError, "heteroscedatic derivates not implemented"
else:
# likelihood is not heterscedatic
self.partial_for_likelihood = 0 #FIXME
#self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
#self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision)
#self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
dbstar_dnoise = self.likelihood.precision * (self.beta_star**2 * self.Diag0[:,None] - self.beta_star) #check
#dbstar_dnoise = self.likelihood.precision * (self.true_beta_star**2 * self.Diag0[:,None] - self.true_beta_star) #TODO erase
Lmi_psi1 = mdot(self.Lmi,self.psi1)
LBiLmipsi1 = np.dot(self.LBi,Lmi_psi1)
aux_0 = np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V)
dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise) # check
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) #check
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) #check
dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T) #check
#dD_dnoise_1 = mdot(self.true_V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T) #TODO eraseme
alpha = mdot(LBiLmipsi1,self.V_star)
alpha_ = mdot(LBiLmipsi1.T,alpha)
dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise ) #check
dD_dnoise = dD_dnoise_1 + dD_dnoise_2
self.partial_for_likelihood = dA_dnoise + dC_dnoise + dD_dnoise
def log_likelihood(self):
""" Compute the (lower bound on the) log marginal likelihood """
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
"""
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
#B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
C = -self.D * (np.sum(np.log(np.diag(self.LB))))
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
return A + C + D # +B
"""
return A+C
return A + C + D
def _log_likelihood_gradients(self):
pass
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
def dL_dtheta(self):
#dL_dtheta = self.dlogB_dtheta
#dL_dtheta = self.dyby_dtheta
#dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta
dL_dtheta = self.dlogB_dtheta
dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta + self.dlogB_dtheta
"""
dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z)
if self.has_uncertain_inputs:
dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X, self.X_variance)
dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T, self.Z, self.X, self.X_variance)
dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X, self.X_variance)
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
else:
dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X)
dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X)
"""
#dL_dtheta = self.dlogbeta_dtheta + self.dyby_dtheta #+ self.dlogB_dtheta + self.dD_dtheta
dL_dtheta = self.dA_dtheta + self.dlogB_dtheta + self.dD_dtheta
return dL_dtheta
def dL_dZ(self):
dL_dZ = np.zeros(self.M)
"""
dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm, self.Z) # factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ
if self.has_uncertain_inputs:
dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1, self.Z, self.X, self.X_variance)
dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2, self.Z, self.X, self.X_variance)
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
else:
dL_dZ += self.kern.dK_dX(self.dL_dpsi1, self.Z, self.X)
"""
dL_dZ = self.dA_dX + self.dlogB_dX + self.dD_dX
return dL_dZ
def _raw_predict(self, Xnew, which_parts, full_cov=False):
Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.likelihood.precision.flatten())
self.Diag = self.Diag0 * Iplus_Dprod_i
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
self.RPT0 = np.dot(self.Lmi,self.psi1)
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
self.RPT = np.dot(self.R,self.P.T)
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
self.w = self.Diag * self.likelihood.v_tilde
self.gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
self.mu = self.w + np.dot(self.P,self.gamma)
if self.likelihood.is_heteroscedastic:
"""
Make a prediction for the generalized FITC model
Arguments
---------
X : Input prediction data - Nx1 numpy array (floats)
"""
# q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T)
# Ci = I + (RPT0)Di(RPT0).T
# C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T
# = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
# = I - V.T * V
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
C = np.eye(self.M) - np.dot(V.T,V)
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
#self.C = C
#self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T
#self.mu_u = mu_u
#self.U = U
# q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T)
mu_H = np.dot(mu_u,self.mu)
self.mu_H = mu_H
Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T))
# q(f_star|y) = N(f_star|mu_star,sigma2_star)
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
KR0T = np.dot(Kx.T,self.Lmi.T)
mu_star = np.dot(KR0T,mu_H)
if full_cov:
Kxx = self.kern.K(Xnew,which_parts=which_parts)
var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T))
else:
Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts)
Kxx_ = self.kern.K(Xnew,which_parts=which_parts) # TODO: RA, is this line needed?
var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) # TODO: RA, is this line needed?
var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None]
return mu_star[:,None],var
else:
raise NotImplementedError, "homoscedastic fitc not implemented"
"""
Kx = self.kern.K(self.Z, Xnew)
mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V)
if full_cov:
Kxx = self.kern.K(Xnew)
var = Kxx - mdot(Kx.T, (self.Kmmi - self.C/self.scale_factor**2), Kx) #NOTE this won't work for plotting
else:
Kxx = self.kern.Kdiag(Xnew)
var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.C/self.scale_factor**2, Kx),0)
return mu,var[:,None]
"""

View file

@ -3,10 +3,11 @@
import numpy as np
from scipy import linalg
import pylab as pb
from .. import kern
from ..core import model
from ..util.linalg import pdinv, mdot
from ..util.linalg import pdinv, mdot, tdot
from ..util.plot import gpplot, x_frame1D, x_frame2D, Tango
from ..likelihoods import EP
@ -58,13 +59,12 @@ class GP(model):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
#FIXME: this doesn;t live here.
return np.zeros_like(self.Z)
def _set_params(self, p):
self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()])
# self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():])
self.K = self.kern.K(self.X)
self.K += self.likelihood.covariance_matrix
@ -73,10 +73,14 @@ class GP(model):
# the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki, self.likelihood.Y)
self.dL_dK = 0.5 * (np.dot(alpha, alpha.T) - self.D * self.Ki)
#alpha = np.dot(self.Ki, self.likelihood.Y)
alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1)
self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
#tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
def _get_params(self):
@ -100,7 +104,9 @@ class GP(model):
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
return -0.5 * np.sum(np.square(tmp))
#return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
else:
return -0.5 * np.sum(np.multiply(self.Ki, self.likelihood.YYT))
@ -123,18 +129,15 @@ class GP(model):
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self, _Xnew, which_parts='all', full_cov=False):
def _raw_predict(self, _Xnew, which_parts='all', full_cov=False,stop=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
#TODO: which_parts does nothing
"""
Kx = self.kern.K(self.X, _Xnew,which_parts=which_parts)
mu = np.dot(np.dot(Kx.T, self.Ki), self.likelihood.Y)
KiKx = np.dot(self.Ki, Kx)
Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T
#KiKx = np.dot(self.Ki, Kx)
KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
mu = np.dot(KiKx.T, self.likelihood.Y)
if full_cov:
Kxx = self.kern.K(_Xnew, which_parts=which_parts)
var = Kxx - np.dot(KiKx.T, Kx)
@ -142,6 +145,8 @@ class GP(model):
Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts)
var = Kxx - np.sum(np.multiply(KiKx, Kx), 0)
var = var[:, None]
if stop:
debug_this
return mu, var
@ -178,7 +183,8 @@ class GP(model):
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
Plot the GP's view of the world, where the data is normalized and the
likelihood is Gaussian.
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
@ -193,8 +199,8 @@ class GP(model):
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
Can plot only part of the data and part of the posterior functions
using which_data and which_functions
"""
if which_data == 'all':
which_data = slice(None)

View file

@ -12,4 +12,4 @@ from sparse_GPLVM import sparse_GPLVM
from Bayesian_GPLVM import Bayesian_GPLVM
from mrd import MRD
from generalized_FITC import generalized_FITC
#from FITC import FITC
from FITC import FITC