fixing EP and merging it with GP_regression

This commit is contained in:
Ricardo Andrade 2013-01-25 18:14:28 +00:00
parent b6ffb57263
commit 6a2e0a1fe5
7 changed files with 403 additions and 93 deletions

View file

@ -60,7 +60,7 @@ class Full(EP):
def fit_EP(self):
"""
The expectation-propagation algorithm.
For nomenclature see Rasmussen & Williams 2006 (pag. 52-60)
For nomenclature see Rasmussen & Williams 2006.
"""
#Prior distribution parameters: p(f|X) = N(f|0,K)
#self.K = self.kernel.K(self.X,self.X)
@ -84,8 +84,6 @@ class Full(EP):
phi = np.empty(self.N,dtype=float)
mu_hat = np.empty(self.N,dtype=float)
sigma2_hat = np.empty(self.N,dtype=float)
self.mu_hat = mu_hat #TODO erase me
self.sigma2_hat = sigma2_hat #TODO erase me
#Approximation
epsilon_np1 = self.epsilon + 1.
@ -95,21 +93,16 @@ class Full(EP):
self.np2 = [self.v_tilde.copy()]
while epsilon_np1 > self.epsilon or epsilon_np2 > self.epsilon:
update_order = np.arange(self.N)
#random.shuffle(update_order) #TODO uncomment
random.shuffle(update_order)
for i in update_order:
#Cavity distribution parameters
self.tau_[i] = 1./self.Sigma[i,i] - self.eta*self.tau_tilde[i]
self.v_[i] = self.mu[i]/self.Sigma[i,i] - self.eta*self.v_tilde[i]
#Marginal moments
self.Z_hat[i], mu_hat[i], sigma2_hat[i] = self.likelihood.moments_match(i,self.tau_[i],self.v_[i])
self.mu_hat[i] = mu_hat[i] #TODO erase me
self.sigma2_hat[i] = sigma2_hat[i] #TODO erase me
#if i == 3:
# a = b
#Site parameters update
Delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./self.Sigma[i,i])
Delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - self.mu[i]/self.Sigma[i,i])
print Delta_tau
self.tau_tilde[i] = self.tau_tilde[i] + Delta_tau
self.v_tilde[i] = self.v_tilde[i] + Delta_v
#Posterior distribution parameters update
@ -128,6 +121,7 @@ class Full(EP):
epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N
self.np1.append(self.tau_tilde.copy())
self.np2.append(self.v_tilde.copy())
return self.tau_tilde[:,None], self.v_tilde[:,None], self.Z_hat[:,None], self.tau_[:,None], self.v_[:,None]
class DTC(EP):
def fit_EP(self):

View file

@ -19,7 +19,7 @@ class likelihood:
self.Y = Y
self.N = self.Y.shape[0]
def plot1Da(self,X_new,Mean_new,Var_new,X_u,Mean_u,Var_u):
def plot1Da(self,X,mean,var,Z=None,mean_Z=None,var_Z=None):
"""
Plot the predictive distribution of the GP model for 1-dimensional inputs
@ -30,10 +30,18 @@ class likelihood:
:param Mean_u: mean values at X_u
:param Var_new: variance values at X_u
"""
assert X_new.shape[1] == 1, 'Number of dimensions must be 1'
gpplot(X_new,Mean_new,Var_new)
pb.errorbar(X_u.flatten(),Mean_u.flatten(),2*np.sqrt(Var_u.flatten()),fmt='r+')
pb.plot(X_u,Mean_u,'ro')
assert X.shape[1] == 1, 'Number of dimensions must be 1'
gpplot(X,mean,var.flatten())
pb.errorbar(Z.flatten(),mean_Z.flatten(),2*np.sqrt(var_Z.flatten()),fmt='r+')
pb.plot(Z,mean_Z,'ro')
def plot1Db(self,X_obs,X,phi,Z=None):
assert X_obs.shape[1] == 1, 'Number of dimensions must be 1'
gpplot(X,phi,np.zeros(X.shape[0]))
pb.plot(X_obs,(self.Y+1)/2,'kx',mew=1.5)
pb.ylim(-0.2,1.2)
if Z is not None:
pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12)
def plot2D(self,X,X_new,F_new,U=None):
"""
@ -88,16 +96,11 @@ class probit(likelihood):
sigma2_hat = 1./tau_i - (phi/((tau_i**2+tau_i)*Z_hat))*(z+phi/Z_hat)
return Z_hat, mu_hat, sigma2_hat
def plot1Db(self,X,X_new,F_new,U=None):
assert X.shape[1] == 1, 'Number of dimensions must be 1'
gpplot(X_new,F_new,np.zeros(X_new.shape[0]))
pb.plot(X,(self.Y+1)/2,'kx',mew=1.5)
pb.ylim(-0.2,1.2)
if U is not None:
pb.plot(U,U*0+.5,'r|',mew=1.5,markersize=12)
def predictive_mean(self,mu,variance):
return stats.norm.cdf(mu/np.sqrt(1+variance))
def predictive_mean(self,mu,var):
mu = mu.flatten()
var = var.flatten()
return stats.norm.cdf(mu/np.sqrt(1+var))
def _log_likelihood_gradients():
raise NotImplementedError