diff --git a/GPy/examples/poisson.py b/GPy/examples/poisson.py index e15f310d..934637f1 100644 --- a/GPy/examples/poisson.py +++ b/GPy/examples/poisson.py @@ -43,6 +43,6 @@ print m.checkgrad() # Optimize and plot m.optimize() #m.em(plot_all=False) # EM algorithm -m.plot() +m.plot(samples=4) print(m) diff --git a/GPy/examples/sparse_ep_fix.py b/GPy/examples/sparse_ep_fix.py index f2c25898..ff90f2bb 100644 --- a/GPy/examples/sparse_ep_fix.py +++ b/GPy/examples/sparse_ep_fix.py @@ -14,6 +14,7 @@ pb.ion() N = 500 M = 5 +pb.close('all') ###################################### ## 1 dimensional example @@ -42,6 +43,7 @@ print m.checkgrad() #check gradient FIXME unit test please # optimize and plot #m.optimize('tnc', messages = 1) +m.EM() m.plot(samples=3,full_cov=False) # print(m) diff --git a/GPy/inference/likelihoods.py b/GPy/inference/likelihoods.py index b170dc3d..acf1aa2d 100644 --- a/GPy/inference/likelihoods.py +++ b/GPy/inference/likelihoods.py @@ -83,12 +83,20 @@ class probit(likelihood): var = var.flatten() return stats.norm.cdf(mu/np.sqrt(1+var)) + def predictive_var(self,mu,var): + p=self.predictive_mean(mu,var) + return p*(1-p) + def _log_likelihood_gradients(): raise NotImplementedError - def plot(self,X,phi,X_obs,Z=None): + def plot(self,X,mu,var,phi,X_obs,Z=None,samples=0): assert X_obs.shape[1] == 1, 'Number of dimensions must be 1' - gpplot(X,phi,np.zeros(X.shape[0])) + phi_var = self.predictive_var(mu,var) + gpplot(X,phi,phi_var) + if samples: + phi_samples = np.vstack([np.random.binomial(1,phi.flatten()) for s in range(samples)]) + pb.plot(X,phi_samples.T,'x', alpha = 0.4, c='#3465a4' ) pb.plot(X_obs,(self.Y+1)/2,'kx',mew=1.5) if Z is not None: pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12) @@ -164,16 +172,22 @@ class poisson(likelihood): sigma2_hat = m2 - mu_hat**2 # Second central moment return float(Z_hat), float(mu_hat), float(sigma2_hat) - def predictive_mean(self,mu,variance): + def predictive_mean(self,mu,var): return np.exp(mu*self.scale + self.location) + def predictive_var(self,mu,var): + return predictive_mean(mu,var) + def _log_likelihood_gradients(): raise NotImplementedError - def plot(self,X,phi,X_obs,Z=None): + def plot(self,X,mu,var,phi,X_obs,Z=None,samples=0): assert X_obs.shape[1] == 1, 'Number of dimensions must be 1' - gpplot(X,phi,np.zeros(X.shape[0])) + gpplot(X,phi,phi.flatten()) pb.plot(X_obs,self.Y,'kx',mew=1.5) + if samples: + phi_samples = np.vstack([np.random.poisson(phi.flatten(),phi.size) for s in range(samples)]) + pb.plot(X,phi_samples.T, alpha = 0.4, c='#3465a4', linewidth = 0.8) if Z is not None: pb.plot(Z,Z*0+pb.ylim()[0],'k|',mew=1.5,markersize=12) diff --git a/GPy/models/GP.py b/GPy/models/GP.py index 482143d6..8222fd6a 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -285,7 +285,7 @@ class GP(model): if self.EP: pb.subplot(212) - self.likelihood.plot(Xnew,phi,self.X) + self.likelihood.plot(Xnew,m,v,phi,self.X,samples=samples) pb.xlim(xmin,xmax) elif self.X.shape[1]==2: diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 8b1b6fb9..ba07254f 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -151,7 +151,6 @@ class sparse_GP(GP): else: self.ep_approx = Full(self.X,self.likelihood,self.kernel,inducing=None,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta]) self.beta, self.Y, self.Z_ep = self.ep_approx.fit_EP() - print "Aqui toy" self.trbetaYYT = np.sum(np.square(self.Y)*self.beta) self._computations()