mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 01:02:39 +02:00
Fixing bernoulli likelihood for Laplace, fixing Zep for EP, and starting working on quadrature limits
This commit is contained in:
parent
6b6938bd11
commit
5b4abf4c34
8 changed files with 70 additions and 39 deletions
|
|
@ -22,7 +22,7 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
def __init__(self):
|
||||
pass#self._YYTfactor_cache = caching.cache()
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, precision=None):
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, precision=None, Z=None):
|
||||
"""
|
||||
Returns a Posterior class containing essential quantities of the posterior
|
||||
"""
|
||||
|
|
@ -49,9 +49,15 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
|
||||
log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor))
|
||||
|
||||
if Z is not None:
|
||||
# This is a correction term for the log marginal likelihood
|
||||
# In EP this is log Z_tilde, which is the difference between the
|
||||
# Gaussian marginal and Z_EP
|
||||
log_marginal += Z
|
||||
|
||||
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata)
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata)
|
||||
|
||||
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,26 +39,25 @@ class EPBase(object):
|
|||
class EP(EPBase, ExactGaussianInference):
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, precision=None, K=None):
|
||||
num_data, output_dim = Y.shape
|
||||
assert output_dim ==1, "ep in 1D only (for now!)"
|
||||
assert output_dim == 1, "ep in 1D only (for now!)"
|
||||
|
||||
if K is None:
|
||||
K = kern.K(X)
|
||||
|
||||
if self._ep_approximation is None:
|
||||
#if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
|
||||
else:
|
||||
#if we've already run EP, just use the existing approximation stored in self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation
|
||||
|
||||
return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, precision=1./tau_tilde, K=K)
|
||||
return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, precision=1./tau_tilde, K=K, Z=np.log(Z_tilde).sum())
|
||||
|
||||
def expectation_propagation(self, K, Y, likelihood, Y_metadata):
|
||||
|
||||
num_data, data_dim = Y.shape
|
||||
assert data_dim == 1, "This EP methods only works for 1D outputs"
|
||||
|
||||
|
||||
#Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
|
||||
mu = np.zeros(num_data)
|
||||
Sigma = K.copy()
|
||||
|
|
@ -69,6 +68,9 @@ class EP(EPBase, ExactGaussianInference):
|
|||
mu_hat = np.empty(num_data,dtype=np.float64)
|
||||
sigma2_hat = np.empty(num_data,dtype=np.float64)
|
||||
|
||||
tau_cav = np.empty(num_data,dtype=np.float64)
|
||||
v_cav = np.empty(num_data,dtype=np.float64)
|
||||
|
||||
#initial values - Gaussian factors
|
||||
if self.old_mutilde is None:
|
||||
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data))
|
||||
|
|
@ -80,15 +82,17 @@ class EP(EPBase, ExactGaussianInference):
|
|||
#Approximation
|
||||
tau_diff = self.epsilon + 1.
|
||||
v_diff = self.epsilon + 1.
|
||||
tau_tilde_old = np.nan
|
||||
v_tilde_old = np.nan
|
||||
iterations = 0
|
||||
while (tau_diff > self.epsilon) or (v_diff > self.epsilon):
|
||||
update_order = np.random.permutation(num_data)
|
||||
for i in update_order:
|
||||
#Cavity distribution parameters
|
||||
tau_cav = 1./Sigma[i,i] - self.eta*tau_tilde[i]
|
||||
v_cav = mu[i]/Sigma[i,i] - self.eta*v_tilde[i]
|
||||
tau_cav[i] = 1./Sigma[i,i] - self.eta*tau_tilde[i]
|
||||
v_cav[i] = mu[i]/Sigma[i,i] - self.eta*v_tilde[i]
|
||||
#Marginal moments
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i])#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
|
||||
#Site parameters update
|
||||
delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i])
|
||||
delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i])
|
||||
|
|
@ -108,7 +112,7 @@ class EP(EPBase, ExactGaussianInference):
|
|||
mu = np.dot(Sigma,v_tilde)
|
||||
|
||||
#monitor convergence
|
||||
if iterations>0:
|
||||
if iterations > 0:
|
||||
tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old))
|
||||
v_diff = np.mean(np.square(v_tilde-v_tilde_old))
|
||||
tau_tilde_old = tau_tilde.copy()
|
||||
|
|
@ -117,7 +121,11 @@ class EP(EPBase, ExactGaussianInference):
|
|||
iterations += 1
|
||||
|
||||
mu_tilde = v_tilde/tau_tilde
|
||||
return mu, Sigma, mu_tilde, tau_tilde, Z_hat
|
||||
mu_cav = v_cav/tau_cav
|
||||
sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde
|
||||
Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde)
|
||||
+ 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde))
|
||||
return mu, Sigma, mu_tilde, tau_tilde, Z_tilde
|
||||
|
||||
class EPDTC(EPBase, VarDTC):
|
||||
def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||
|
|
@ -133,16 +141,16 @@ class EPDTC(EPBase, VarDTC):
|
|||
Kmn = psi1.T
|
||||
|
||||
if self._ep_approximation is None:
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata)
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata)
|
||||
else:
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation
|
||||
|
||||
return super(EPDTC, self).inference(kern, X, Z, likelihood, mu_tilde,
|
||||
mean_function=mean_function,
|
||||
Y_metadata=Y_metadata,
|
||||
precision=tau_tilde,
|
||||
Lm=Lm, dL_dKmm=dL_dKmm,
|
||||
psi0=psi0, psi1=psi1, psi2=psi2)
|
||||
psi0=psi0, psi1=psi1, psi2=psi2, Z=Z_tilde)
|
||||
|
||||
def expectation_propagation(self, Kmm, Kmn, Y, likelihood, Y_metadata):
|
||||
num_data, output_dim = Y.shape
|
||||
|
|
@ -167,6 +175,9 @@ class EPDTC(EPBase, VarDTC):
|
|||
mu_hat = np.zeros(num_data,dtype=np.float64)
|
||||
sigma2_hat = np.zeros(num_data,dtype=np.float64)
|
||||
|
||||
tau_cav = np.empty(num_data,dtype=np.float64)
|
||||
v_cav = np.empty(num_data,dtype=np.float64)
|
||||
|
||||
#initial values - Gaussian factors
|
||||
if self.old_mutilde is None:
|
||||
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data))
|
||||
|
|
@ -186,10 +197,10 @@ class EPDTC(EPBase, VarDTC):
|
|||
while (tau_diff > self.epsilon) or (v_diff > self.epsilon):
|
||||
for i in update_order:
|
||||
#Cavity distribution parameters
|
||||
tau_cav = 1./Sigma_diag[i] - self.eta*tau_tilde[i]
|
||||
v_cav = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i]
|
||||
tau_cav[i] = 1./Sigma_diag[i] - self.eta*tau_tilde[i]
|
||||
v_cav[i] = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i]
|
||||
#Marginal moments
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
|
||||
Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i])#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
|
||||
#Site parameters update
|
||||
delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])
|
||||
delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i])
|
||||
|
|
@ -233,5 +244,8 @@ class EPDTC(EPBase, VarDTC):
|
|||
iterations += 1
|
||||
|
||||
mu_tilde = v_tilde/tau_tilde
|
||||
return mu, Sigma, ObsAr(mu_tilde[:,None]), tau_tilde, Z_hat
|
||||
|
||||
mu_cav = v_cav/tau_cav
|
||||
sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde
|
||||
Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde)
|
||||
+ 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde))
|
||||
return mu, Sigma, ObsAr(mu_tilde[:,None]), tau_tilde, Z_tilde
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue