From 5b4abf4c34045e7e355f34bcca86cacaa8642d05 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Mon, 19 Oct 2015 19:29:57 +0100 Subject: [PATCH] Fixing bernoulli likelihood for Laplace, fixing Zep for EP, and starting working on quadrature limits --- GPy/examples/regression.py | 2 +- .../exact_gaussian_inference.py | 10 +++- .../expectation_propagation.py | 50 ++++++++++++------- GPy/likelihoods/bernoulli.py | 10 ++-- GPy/likelihoods/gaussian.py | 2 +- GPy/likelihoods/likelihood.py | 21 +++++--- GPy/likelihoods/poisson.py | 5 +- GPy/testing/likelihood_tests.py | 9 ++-- 8 files changed, 70 insertions(+), 39 deletions(-) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 1ce2f6f4..11734564 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -275,7 +275,7 @@ def toy_rbf_1d_50(optimize=True, plot=True): def toy_poisson_rbf_1d_laplace(optimize=True, plot=True): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" optimizer='scg' - x_len = 30 + x_len = 100 X = np.linspace(0, 10, x_len)[:, None] f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X)) Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None] diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 2d8fb691..24d374da 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -22,7 +22,7 @@ class ExactGaussianInference(LatentFunctionInference): def __init__(self): pass#self._YYTfactor_cache = caching.cache() - def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, precision=None): + def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, precision=None, Z=None): """ Returns a Posterior class containing essential quantities of the posterior """ @@ -49,9 +49,15 @@ class ExactGaussianInference(LatentFunctionInference): log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor)) + if Z is not None: + # This is a correction term for the log marginal likelihood + # In EP this is log Z_tilde, which is the difference between the + # Gaussian marginal and Z_EP + log_marginal += Z + dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) - dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata) + dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata) return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha} diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py index d293d4de..666fda79 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -39,26 +39,25 @@ class EPBase(object): class EP(EPBase, ExactGaussianInference): def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, precision=None, K=None): num_data, output_dim = Y.shape - assert output_dim ==1, "ep in 1D only (for now!)" + assert output_dim == 1, "ep in 1D only (for now!)" if K is None: K = kern.K(X) if self._ep_approximation is None: #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation - mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata) + mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata) else: #if we've already run EP, just use the existing approximation stored in self._ep_approximation - mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation + mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation - return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, precision=1./tau_tilde, K=K) + return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, precision=1./tau_tilde, K=K, Z=np.log(Z_tilde).sum()) def expectation_propagation(self, K, Y, likelihood, Y_metadata): num_data, data_dim = Y.shape assert data_dim == 1, "This EP methods only works for 1D outputs" - #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma) mu = np.zeros(num_data) Sigma = K.copy() @@ -69,6 +68,9 @@ class EP(EPBase, ExactGaussianInference): mu_hat = np.empty(num_data,dtype=np.float64) sigma2_hat = np.empty(num_data,dtype=np.float64) + tau_cav = np.empty(num_data,dtype=np.float64) + v_cav = np.empty(num_data,dtype=np.float64) + #initial values - Gaussian factors if self.old_mutilde is None: tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data)) @@ -80,15 +82,17 @@ class EP(EPBase, ExactGaussianInference): #Approximation tau_diff = self.epsilon + 1. v_diff = self.epsilon + 1. + tau_tilde_old = np.nan + v_tilde_old = np.nan iterations = 0 while (tau_diff > self.epsilon) or (v_diff > self.epsilon): update_order = np.random.permutation(num_data) for i in update_order: #Cavity distribution parameters - tau_cav = 1./Sigma[i,i] - self.eta*tau_tilde[i] - v_cav = mu[i]/Sigma[i,i] - self.eta*v_tilde[i] + tau_cav[i] = 1./Sigma[i,i] - self.eta*tau_tilde[i] + v_cav[i] = mu[i]/Sigma[i,i] - self.eta*v_tilde[i] #Marginal moments - Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) + Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i])#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) #Site parameters update delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i]) delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i]) @@ -108,7 +112,7 @@ class EP(EPBase, ExactGaussianInference): mu = np.dot(Sigma,v_tilde) #monitor convergence - if iterations>0: + if iterations > 0: tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old)) v_diff = np.mean(np.square(v_tilde-v_tilde_old)) tau_tilde_old = tau_tilde.copy() @@ -117,7 +121,11 @@ class EP(EPBase, ExactGaussianInference): iterations += 1 mu_tilde = v_tilde/tau_tilde - return mu, Sigma, mu_tilde, tau_tilde, Z_hat + mu_cav = v_cav/tau_cav + sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde + Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde) + + 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde)) + return mu, Sigma, mu_tilde, tau_tilde, Z_tilde class EPDTC(EPBase, VarDTC): def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None): @@ -133,16 +141,16 @@ class EPDTC(EPBase, VarDTC): Kmn = psi1.T if self._ep_approximation is None: - mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata) + mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata) else: - mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation + mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation return super(EPDTC, self).inference(kern, X, Z, likelihood, mu_tilde, mean_function=mean_function, Y_metadata=Y_metadata, precision=tau_tilde, Lm=Lm, dL_dKmm=dL_dKmm, - psi0=psi0, psi1=psi1, psi2=psi2) + psi0=psi0, psi1=psi1, psi2=psi2, Z=Z_tilde) def expectation_propagation(self, Kmm, Kmn, Y, likelihood, Y_metadata): num_data, output_dim = Y.shape @@ -167,6 +175,9 @@ class EPDTC(EPBase, VarDTC): mu_hat = np.zeros(num_data,dtype=np.float64) sigma2_hat = np.zeros(num_data,dtype=np.float64) + tau_cav = np.empty(num_data,dtype=np.float64) + v_cav = np.empty(num_data,dtype=np.float64) + #initial values - Gaussian factors if self.old_mutilde is None: tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data)) @@ -186,10 +197,10 @@ class EPDTC(EPBase, VarDTC): while (tau_diff > self.epsilon) or (v_diff > self.epsilon): for i in update_order: #Cavity distribution parameters - tau_cav = 1./Sigma_diag[i] - self.eta*tau_tilde[i] - v_cav = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i] + tau_cav[i] = 1./Sigma_diag[i] - self.eta*tau_tilde[i] + v_cav[i] = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i] #Marginal moments - Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) + Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i])#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) #Site parameters update delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i]) delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i]) @@ -233,5 +244,8 @@ class EPDTC(EPBase, VarDTC): iterations += 1 mu_tilde = v_tilde/tau_tilde - return mu, Sigma, ObsAr(mu_tilde[:,None]), tau_tilde, Z_hat - + mu_cav = v_cav/tau_cav + sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde + Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde) + + 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde)) + return mu, Sigma, ObsAr(mu_tilde[:,None]), tau_tilde, Z_tilde diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 856de40f..7f1e55a8 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -140,7 +140,7 @@ class Bernoulli(Likelihood): Each y_i must be in {0, 1} """ #objective = (inv_link_f**y) * ((1.-inv_link_f)**(1.-y)) - return np.where(y, inv_link_f, 1.-inv_link_f) + return np.where(y==1, inv_link_f, 1.-inv_link_f) def logpdf_link(self, inv_link_f, y, Y_metadata=None): """ @@ -179,7 +179,7 @@ class Bernoulli(Likelihood): #grad = (y/inv_link_f) - (1.-y)/(1-inv_link_f) #grad = np.where(y, 1./inv_link_f, -1./(1-inv_link_f)) ff = np.clip(inv_link_f, 1e-9, 1-1e-9) - denom = np.where(y, ff, -(1-ff)) + denom = np.where(y==1, ff, -(1-ff)) return 1./denom def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None): @@ -205,7 +205,7 @@ class Bernoulli(Likelihood): """ #d2logpdf_dlink2 = -y/(inv_link_f**2) - (1-y)/((1-inv_link_f)**2) #d2logpdf_dlink2 = np.where(y, -1./np.square(inv_link_f), -1./np.square(1.-inv_link_f)) - arg = np.where(y, inv_link_f, 1.-inv_link_f) + arg = np.where(y==1, inv_link_f, 1.-inv_link_f) ret = -1./np.square(np.clip(arg, 1e-9, 1e9)) if np.any(np.isinf(ret)): stop @@ -230,7 +230,7 @@ class Bernoulli(Likelihood): #d3logpdf_dlink3 = 2*(y/(inv_link_f**3) - (1-y)/((1-inv_link_f)**3)) state = np.seterr(divide='ignore') # TODO check y \in {0, 1} or {-1, 1} - d3logpdf_dlink3 = np.where(y, 2./(inv_link_f**3), -2./((1.-inv_link_f)**3)) + d3logpdf_dlink3 = np.where(y==1, 2./(inv_link_f**3), -2./((1.-inv_link_f)**3)) np.seterr(**state) return d3logpdf_dlink3 @@ -243,8 +243,6 @@ class Bernoulli(Likelihood): p = self.predictive_mean(mu, var) return [np.asarray(p>(q/100.), dtype=np.int32) for q in quantiles] - - def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index e1299f73..1c68bac7 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -67,7 +67,7 @@ class Gaussian(Likelihood): """ return Y - def _moments_match_ep(self, data_i, tau_i, v_i): + def moments_match_ep(self, data_i, tau_i, v_i): """ Moments match of the marginal approximation in EP algorithm diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 74c4c6fd..5ae8c79d 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -49,8 +49,8 @@ class Likelihood(Parameterized): """ return Y.shape[1] - def _gradients(self,partial): - return np.zeros(0) + def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None): + return np.zeros(self.size) def update_gradients(self, partial): if self.size > 0: @@ -176,8 +176,10 @@ class Likelihood(Parameterized): log_p_ystar = np.array(log_p_ystar).reshape(*y_test.shape) return log_p_ystar + def quad_limits(self): + return -np.inf, np.inf - def _moments_match_ep(self,obs,tau,v): + def moments_match_ep(self,obs,tau,v): """ Calculation of moments using quadrature @@ -188,20 +190,27 @@ class Likelihood(Parameterized): #Compute first integral for zeroth moment. #NOTE constant np.sqrt(2*pi/tau) added at the end of the function mu = v/tau + sigma2 = 1./tau + #Lets do these for now based on the same idea as Gaussian quadrature + # i.e. multiply anything by close to zero, and its zero. + f_min = mu - 8*np.sqrt(sigma2) + f_max = mu + 8*np.sqrt(sigma2) + + # f_min, f_max = self.quad_limits() def int_1(f): return self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f)) - z_scaled, accuracy = quad(int_1, -np.inf, np.inf) + z_scaled, accuracy = quad(int_1, f_min, f_max) #Compute second integral for first moment def int_2(f): return f*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f)) - mean, accuracy = quad(int_2, -np.inf, np.inf) + mean, accuracy = quad(int_2, f_min, f_max) mean /= z_scaled #Compute integral for variance def int_3(f): return (f**2)*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f)) - Ef2, accuracy = quad(int_3, -np.inf, np.inf) + Ef2, accuracy = quad(int_3, f_min, f_max) Ef2 /= z_scaled variance = Ef2 - mean**2 diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index cfe279bb..d3eef7a4 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -28,7 +28,7 @@ class Poisson(Likelihood): """ the expected value of y given a value of f """ - return self.gp_link.transf(gp) + return self.gp_link.transf(f) def pdf_link(self, link_f, y, Y_metadata=None): """ @@ -46,7 +46,8 @@ class Poisson(Likelihood): :rtype: float """ assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape - return np.prod(stats.poisson.pmf(y,link_f)) + return np.exp(self.logpdf_link(link_f, y, Y_metadata)) + # return np.prod(stats.poisson.pmf(y,link_f)) def logpdf_link(self, link_f, y, Y_metadata=None): """ diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index de347192..d3750c0b 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -113,6 +113,7 @@ class TestNoiseModels(object): self.Y = (np.sin(self.X[:, 0]*2*np.pi) + noise)[:, None] self.f = np.random.rand(self.N, 1) self.binary_Y = np.asarray(np.random.rand(self.N) > 0.5, dtype=np.int)[:, None] + self.binary_Y[self.binary_Y == 0.0] = -1.0 self.positive_Y = np.exp(self.Y.copy()) tmp = np.round(self.X[:, 0]*3-3)[:, None] + np.random.randint(0,3, self.X.shape[0])[:, None] self.integer_Y = np.where(tmp > 0, tmp, 0) @@ -561,12 +562,14 @@ class TestNoiseModels(object): print("\n{}".format(inspect.stack()[0][3])) np.random.seed(111) #Normalize - Y = Y/Y.max() - + # Y = Y/Y.max() + white_var = 1e-5 kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) laplace_likelihood = GPy.inference.latent_function_inference.Laplace() m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood) + m['.*white'].constrain_fixed(white_var) + m.randomize() #Set constraints @@ -591,7 +594,7 @@ class TestNoiseModels(object): print("\n{}".format(inspect.stack()[0][3])) #Normalize Y = Y/Y.max() - white_var = 1e-6 + white_var = 1e-5 kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) ep_inf = GPy.inference.latent_function_inference.EP()