mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 20:12:38 +02:00
Sparse GP with EP is working now
This commit is contained in:
parent
f2ce47d96e
commit
c6f2082839
4 changed files with 78 additions and 46 deletions
|
|
@ -17,7 +17,7 @@ class EP(likelihood):
|
|||
self.epsilon = epsilon
|
||||
self.eta, self.delta = power_ep
|
||||
self.data = data
|
||||
self.N = self.data.size
|
||||
self.N, self.D = self.data.shape
|
||||
self.is_heteroscedastic = True
|
||||
self.Nparams = 0
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ class EP(likelihood):
|
|||
#initial values for the GP variables
|
||||
self.Y = np.zeros((self.N,1))
|
||||
self.covariance_matrix = np.eye(self.N)
|
||||
self.precision = np.ones(self.N)
|
||||
self.precision = np.ones(self.N)[:,None]
|
||||
self.Z = 0
|
||||
self.YYT = None
|
||||
|
||||
|
|
@ -54,18 +54,14 @@ class EP(likelihood):
|
|||
|
||||
self.Y = mu_tilde[:,None]
|
||||
self.YYT = np.dot(self.Y,self.Y.T)
|
||||
self.precision = self.tau_tilde
|
||||
self.covariance_matrix = np.diag(1./self.precision)
|
||||
self.covariance_matrix = np.diag(1./self.tau_tilde)
|
||||
self.precision = self.tau_tilde[:,None]
|
||||
|
||||
def fit_full(self,K):
|
||||
"""
|
||||
The expectation-propagation algorithm.
|
||||
For nomenclature see Rasmussen & Williams 2006.
|
||||
"""
|
||||
#Prior distribution parameters: p(f|X) = N(f|0,K)
|
||||
|
||||
self.tau_tilde = np.zeros(self.N)
|
||||
self.v_tilde = np.zeros(self.N)
|
||||
#Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
|
||||
mu = np.zeros(self.N)
|
||||
Sigma = K.copy()
|
||||
|
|
@ -124,13 +120,14 @@ class EP(likelihood):
|
|||
|
||||
return self._compute_GP_variables()
|
||||
|
||||
def fit_DTC(self, Knn_diag, Kmn, Kmm):
|
||||
#def fit_DTC(self, Knn_diag, Kmn, Kmm):
|
||||
def fit_DTC(self, Kmm, Kmn):
|
||||
"""
|
||||
The expectation-propagation algorithm with sparse pseudo-input.
|
||||
For nomenclature see ... 2013.
|
||||
"""
|
||||
|
||||
#TODO: this doesn;t work with uncertain inputs!
|
||||
#TODO: this doesn't work with uncertain inputs!
|
||||
|
||||
"""
|
||||
Prior approximation parameters:
|
||||
|
|
@ -158,12 +155,12 @@ class EP(likelihood):
|
|||
sigma_ = 1./tau_
|
||||
mu_ = v_/tau_
|
||||
"""
|
||||
tau_ = np.empty(self.N,dtype=float)
|
||||
v_ = np.empty(self.N,dtype=float)
|
||||
self.tau_ = np.empty(self.N,dtype=float)
|
||||
self.v_ = np.empty(self.N,dtype=float)
|
||||
|
||||
#Initial values - Marginal moments
|
||||
z = np.empty(self.N,dtype=float)
|
||||
Z_hat = np.empty(self.N,dtype=float)
|
||||
self.Z_hat = np.empty(self.N,dtype=float)
|
||||
phi = np.empty(self.N,dtype=float)
|
||||
mu_hat = np.empty(self.N,dtype=float)
|
||||
sigma2_hat = np.empty(self.N,dtype=float)
|
||||
|
|
@ -172,21 +169,21 @@ class EP(likelihood):
|
|||
epsilon_np1 = 1
|
||||
epsilon_np2 = 1
|
||||
self.iterations = 0
|
||||
np1 = [tau_tilde.copy()]
|
||||
np2 = [v_tilde.copy()]
|
||||
np1 = [self.tau_tilde.copy()]
|
||||
np2 = [self.v_tilde.copy()]
|
||||
while epsilon_np1 > self.epsilon or epsilon_np2 > self.epsilon:
|
||||
update_order = np.random.permutation(self.N)
|
||||
for i in update_order:
|
||||
#Cavity distribution parameters
|
||||
tau_[i] = 1./Sigma_diag[i] - self.eta*tau_tilde[i]
|
||||
v_[i] = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i]
|
||||
self.tau_[i] = 1./Sigma_diag[i] - self.eta*self.tau_tilde[i]
|
||||
self.v_[i] = mu[i]/Sigma_diag[i] - self.eta*self.v_tilde[i]
|
||||
#Marginal moments
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood_function.moments_match(self.data[i],tau_[i],v_[i])
|
||||
self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood_function.moments_match(self.data[i],self.tau_[i],self.v_[i])
|
||||
#Site parameters update
|
||||
Delta_tau = delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])
|
||||
Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])
|
||||
Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i])
|
||||
tau_tilde[i] = tau_tilde[i] + Delta_tau
|
||||
v_tilde[i] = v_tilde[i] + Delta_v
|
||||
self.tau_tilde[i] = self.tau_tilde[i] + Delta_tau
|
||||
self.v_tilde[i] = self.v_tilde[i] + Delta_v
|
||||
#Posterior distribution parameters update
|
||||
LLT = LLT + np.outer(Kmn[:,i],Kmn[:,i])*Delta_tau
|
||||
L = jitchol(LLT)
|
||||
|
|
@ -196,25 +193,26 @@ class EP(likelihood):
|
|||
mu = mu + (Delta_v-Delta_tau*mu[i])*si
|
||||
self.iterations += 1
|
||||
#Sigma recomputation with Cholesky decompositon
|
||||
LLT0 = LLT0 + np.dot(Kmn*tau_tilde[None,:],Kmn.T)
|
||||
LLT0 = LLT0 + np.dot(Kmn*self.tau_tilde[None,:],Kmn.T)
|
||||
L = jitchol(LLT)
|
||||
V,info = linalg.flapack.dtrtrs(L,Kmn,lower=1)
|
||||
V2,info = linalg.flapack.dtrtrs(L.T,V,lower=0)
|
||||
Sigma_diag = np.sum(V*V,-2)
|
||||
Knmv_tilde = np.dot(Kmn,v_tilde)
|
||||
Knmv_tilde = np.dot(Kmn,self.v_tilde)
|
||||
mu = np.dot(V2.T,Knmv_tilde)
|
||||
epsilon_np1 = sum((tau_tilde-np1[-1])**2)/self.N
|
||||
epsilon_np2 = sum((v_tilde-np2[-1])**2)/self.N
|
||||
np1.append(tau_tilde.copy())
|
||||
np2.append(v_tilde.copy())
|
||||
epsilon_np1 = sum((self.tau_tilde-np1[-1])**2)/self.N
|
||||
epsilon_np2 = sum((self.v_tilde-np2[-1])**2)/self.N
|
||||
np1.append(self.tau_tilde.copy())
|
||||
np2.append(self.v_tilde.copy())
|
||||
|
||||
self._compute_GP_variables()
|
||||
|
||||
def fit_FITC(self, Knn_diag, Kmn):
|
||||
def fit_FITC(self, Kmm, Kmn, Knn_diag):
|
||||
"""
|
||||
The expectation-propagation algorithm with sparse pseudo-input.
|
||||
For nomenclature see Naish-Guzman and Holden, 2008.
|
||||
"""
|
||||
M = Kmm.shape[0]
|
||||
|
||||
"""
|
||||
Prior approximation parameters:
|
||||
|
|
@ -235,7 +233,7 @@ class EP(likelihood):
|
|||
mu = w + P*gamma
|
||||
"""
|
||||
self.w = np.zeros(self.N)
|
||||
self.gamma = np.zeros(self.M)
|
||||
self.gamma = np.zeros(M)
|
||||
mu = np.zeros(self.N)
|
||||
P = P0.copy()
|
||||
R = R0.copy()
|
||||
|
|
@ -271,7 +269,7 @@ class EP(likelihood):
|
|||
self.tau_[i] = 1./Sigma_diag[i] - self.eta*self.tau_tilde[i]
|
||||
self.v_[i] = mu[i]/Sigma_diag[i] - self.eta*self.v_tilde[i]
|
||||
#Marginal moments
|
||||
self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood_function.moments_match(data[i],self.tau_[i],self.v_[i])
|
||||
self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood_function.moments_match(self.data[i],self.tau_[i],self.v_[i])
|
||||
#Site parameters update
|
||||
Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])
|
||||
Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i])
|
||||
|
|
@ -281,10 +279,10 @@ class EP(likelihood):
|
|||
dtd1 = Delta_tau*Diag[i] + 1.
|
||||
dii = Diag[i]
|
||||
Diag[i] = dii - (Delta_tau * dii**2.)/dtd1
|
||||
pi_ = P[i,:].reshape(1,self.M)
|
||||
pi_ = P[i,:].reshape(1,M)
|
||||
P[i,:] = pi_ - (Delta_tau*dii)/dtd1 * pi_
|
||||
Rp_i = np.dot(R,pi_.T)
|
||||
RTR = np.dot(R.T,np.dot(np.eye(self.M) - Delta_tau/(1.+Delta_tau*Sigma_diag[i]) * np.dot(Rp_i,Rp_i.T),R))
|
||||
RTR = np.dot(R.T,np.dot(np.eye(M) - Delta_tau/(1.+Delta_tau*Sigma_diag[i]) * np.dot(Rp_i,Rp_i.T),R))
|
||||
R = jitchol(RTR).T
|
||||
self.w[i] = self.w[i] + (Delta_v - Delta_tau*self.w[i])*dii/dtd1
|
||||
self.gamma = self.gamma + (Delta_v - Delta_tau*mu[i])*np.dot(RTR,P[i,:].T)
|
||||
|
|
@ -296,7 +294,7 @@ class EP(likelihood):
|
|||
Diag = Diag0/(1.+ Diag0 * self.tau_tilde)
|
||||
P = (Diag / Diag0)[:,None] * P0
|
||||
RPT0 = np.dot(R0,P0.T)
|
||||
L = jitchol(np.eye(self.M) + np.dot(RPT0,(1./Diag0 - Diag/(Diag0**2))[:,None]*RPT0.T))
|
||||
L = jitchol(np.eye(M) + np.dot(RPT0,(1./Diag0 - Diag/(Diag0**2))[:,None]*RPT0.T))
|
||||
R,info = linalg.flapack.dtrtrs(L,R0,lower=1)
|
||||
RPT = np.dot(R,P.T)
|
||||
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
|
||||
|
|
|
|||
|
|
@ -37,8 +37,8 @@ class probit(likelihood_function):
|
|||
:param tau_i: precision of the cavity distribution (float)
|
||||
:param v_i: mean/variance of the cavity distribution (float)
|
||||
"""
|
||||
# TODO: some version of assert np.sum(np.abs(Y)-1) == 0, "Output values must be either -1 or 1"
|
||||
if data_i == 0: data_i = -1 #NOTE Binary classification works better classes {-1,1}, 1D-plotting works better with classes {0,1}.
|
||||
if data_i == 0: data_i = -1 #NOTE Binary classification algorithm works better with classes {-1,1}, 1D-plotting works better with classes {0,1}.
|
||||
# TODO: some version of assert
|
||||
z = data_i*v_i/np.sqrt(tau_i**2 + tau_i)
|
||||
Z_hat = stats.norm.cdf(z)
|
||||
phi = stats.norm.pdf(z)
|
||||
|
|
|
|||
|
|
@ -82,9 +82,9 @@ class sparse_GP(GP):
|
|||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.D == 1 #TODO: what is the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
if self.has_uncertain_inputs:
|
||||
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.reshape(self.N,1,1)/sf2)).sum(0)
|
||||
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0)
|
||||
else:
|
||||
tmp = self.psi1.T*(np.sqrt(self.likelihood.precision.reshape(1,self.N))/sf)
|
||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf)
|
||||
self.psi2_beta_scaled = np.dot(tmp,tmp.T)
|
||||
else:
|
||||
if self.has_uncertain_inputs:
|
||||
|
|
@ -107,14 +107,18 @@ class sparse_GP(GP):
|
|||
self.E = mdot(self.C, self.psi1VVpsi1/sf2, self.C.T)
|
||||
|
||||
# Compute dL_dpsi # FIXME: this is untested for the het. case
|
||||
self.dL_dpsi0 = - 0.5 * self.D * self.likelihood.precision * np.ones(self.N)
|
||||
self.dL_dpsi0 = - 0.5 * self.D * (self.likelihood.precision * np.ones([self.N,1])).flatten()
|
||||
self.dL_dpsi1 = mdot(self.V, self.psi1V.T,self.C).T
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
self.dL_dpsi2 = 0.5 * self.likelihood.precision[:,None,None] * self.D * self.Kmmi[None,:,:] # dB
|
||||
self.dL_dpsi2 += - 0.5 * self.likelihood.precision[:,None,None]/sf2 * self.D * self.C[None,:,:] # dC
|
||||
self.dL_dpsi2 += - 0.5 * self.likelihood.precision[:,None,None]* self.E[None,:,:] # dD
|
||||
if not self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "TODO: recaste derivatibes in psi2 back into psi1"
|
||||
if self.has_uncertain_inputs:
|
||||
self.dL_dpsi2 = 0.5 * self.likelihood.precision[:,None,None] * self.D * self.Kmmi[None,:,:] # dB
|
||||
self.dL_dpsi2 += - 0.5 * self.likelihood.precision[:,None,None]/sf2 * self.D * self.C[None,:,:] # dC
|
||||
self.dL_dpsi2 += - 0.5 * self.likelihood.precision[:,None,None]* self.E[None,:,:] # dD
|
||||
else:
|
||||
self.dL_dpsi1 += mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)) #dB
|
||||
self.dL_dpsi1 += -mdot(self.C,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)/sf2) #dC
|
||||
self.dL_dpsi1 += -mdot(self.E,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)) #dD
|
||||
self.dL_dpsi2 = None
|
||||
|
||||
else:
|
||||
self.dL_dpsi2 = 0.5 * self.likelihood.precision * self.D * self.Kmmi # dB
|
||||
|
|
@ -166,14 +170,28 @@ class sparse_GP(GP):
|
|||
def _get_param_names(self):
|
||||
return sum([['iip_%i_%i'%(i,j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[]) + GP._get_param_names(self)
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
Approximates a non-gaussian likelihood using Expectation Propagation
|
||||
|
||||
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
|
||||
this function does nothing
|
||||
"""
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "EP approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_DTC(self.Kmm,self.psi1)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
sf2 = self.scale_factor**2
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5*self.N*self.D*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y)
|
||||
B = -0.5*self.D*(np.sum(self.likelihood.precision.flatten()*self.psi0) - np.trace(self.A)*sf2)
|
||||
else:
|
||||
A = -0.5*self.N*self.D*(np.log(2.*np.pi) - np.log(self.likelihood.precision)) -0.5*self.likelihood.precision*self.likelihood.trYYT
|
||||
B = -0.5*self.D*(np.sum(self.likelihood.precision*self.psi0) - np.trace(self.A)*sf2)
|
||||
B = -0.5*self.D*(np.sum(self.likelihood.precision*self.psi0) - np.trace(self.A)*sf2)
|
||||
C = -0.5*self.D * (self.B_logdet + self.M*np.log(sf2))
|
||||
D = +0.5*np.sum(self.psi1VVpsi1 * self.C)
|
||||
return A+B+C+D
|
||||
|
|
|
|||
|
|
@ -157,13 +157,29 @@ class GradientTests(unittest.TestCase):
|
|||
def test_GP_EP_probit(self):
|
||||
N = 20
|
||||
X = np.hstack([np.random.normal(5,2,N/2),np.random.normal(10,2,N/2)])[:,None]
|
||||
Y = np.hstack([np.ones(N/2),np.repeat(-1,N/2)])[:,None]
|
||||
Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None]
|
||||
kernel = GPy.kern.rbf(1)
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(Y, distribution)
|
||||
m = GPy.models.GP(X, likelihood, kernel)
|
||||
m.ensure_default_constraints()
|
||||
self.assertTrue(m.EPEM)
|
||||
m.update_likelihood_approximation()
|
||||
self.assertTrue(m.checkgrad())
|
||||
#self.assertTrue(m.EPEM)
|
||||
|
||||
def test_sparse_EP_DTC_probit(self):
|
||||
N = 20
|
||||
X = np.hstack([np.random.normal(5,2,N/2),np.random.normal(10,2,N/2)])[:,None]
|
||||
Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None]
|
||||
Z = np.linspace(0,15,4)[:,None]
|
||||
kernel = GPy.kern.rbf(1)
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(Y, distribution)
|
||||
m = GPy.models.sparse_GP(X, likelihood, kernel,Z)
|
||||
m.ensure_default_constraints()
|
||||
m.update_likelihood_approximation()
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
|
||||
@unittest.skip("FITC will be broken for a while")
|
||||
def test_generalized_FITC(self):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue