mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
EP plots samples now for the phi transformation.
This commit is contained in:
parent
d1a0883c12
commit
29eb61d65e
5 changed files with 23 additions and 8 deletions
|
|
@ -43,6 +43,6 @@ print m.checkgrad()
|
|||
# Optimize and plot
|
||||
m.optimize()
|
||||
#m.em(plot_all=False) # EM algorithm
|
||||
m.plot()
|
||||
m.plot(samples=4)
|
||||
|
||||
print(m)
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ pb.ion()
|
|||
N = 500
|
||||
M = 5
|
||||
|
||||
pb.close('all')
|
||||
######################################
|
||||
## 1 dimensional example
|
||||
|
||||
|
|
@ -42,6 +43,7 @@ print m.checkgrad()
|
|||
#check gradient FIXME unit test please
|
||||
# optimize and plot
|
||||
#m.optimize('tnc', messages = 1)
|
||||
m.EM()
|
||||
m.plot(samples=3,full_cov=False)
|
||||
# print(m)
|
||||
|
||||
|
|
|
|||
|
|
@ -83,12 +83,20 @@ class probit(likelihood):
|
|||
var = var.flatten()
|
||||
return stats.norm.cdf(mu/np.sqrt(1+var))
|
||||
|
||||
def predictive_var(self,mu,var):
|
||||
p=self.predictive_mean(mu,var)
|
||||
return p*(1-p)
|
||||
|
||||
def _log_likelihood_gradients():
|
||||
raise NotImplementedError
|
||||
|
||||
def plot(self,X,phi,X_obs,Z=None):
|
||||
def plot(self,X,mu,var,phi,X_obs,Z=None,samples=0):
|
||||
assert X_obs.shape[1] == 1, 'Number of dimensions must be 1'
|
||||
gpplot(X,phi,np.zeros(X.shape[0]))
|
||||
phi_var = self.predictive_var(mu,var)
|
||||
gpplot(X,phi,phi_var)
|
||||
if samples:
|
||||
phi_samples = np.vstack([np.random.binomial(1,phi.flatten()) for s in range(samples)])
|
||||
pb.plot(X,phi_samples.T,'x', alpha = 0.4, c='#3465a4' )
|
||||
pb.plot(X_obs,(self.Y+1)/2,'kx',mew=1.5)
|
||||
if Z is not None:
|
||||
pb.plot(Z,Z*0+.5,'r|',mew=1.5,markersize=12)
|
||||
|
|
@ -164,16 +172,22 @@ class poisson(likelihood):
|
|||
sigma2_hat = m2 - mu_hat**2 # Second central moment
|
||||
return float(Z_hat), float(mu_hat), float(sigma2_hat)
|
||||
|
||||
def predictive_mean(self,mu,variance):
|
||||
def predictive_mean(self,mu,var):
|
||||
return np.exp(mu*self.scale + self.location)
|
||||
|
||||
def predictive_var(self,mu,var):
|
||||
return predictive_mean(mu,var)
|
||||
|
||||
def _log_likelihood_gradients():
|
||||
raise NotImplementedError
|
||||
|
||||
def plot(self,X,phi,X_obs,Z=None):
|
||||
def plot(self,X,mu,var,phi,X_obs,Z=None,samples=0):
|
||||
assert X_obs.shape[1] == 1, 'Number of dimensions must be 1'
|
||||
gpplot(X,phi,np.zeros(X.shape[0]))
|
||||
gpplot(X,phi,phi.flatten())
|
||||
pb.plot(X_obs,self.Y,'kx',mew=1.5)
|
||||
if samples:
|
||||
phi_samples = np.vstack([np.random.poisson(phi.flatten(),phi.size) for s in range(samples)])
|
||||
pb.plot(X,phi_samples.T, alpha = 0.4, c='#3465a4', linewidth = 0.8)
|
||||
if Z is not None:
|
||||
pb.plot(Z,Z*0+pb.ylim()[0],'k|',mew=1.5,markersize=12)
|
||||
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ class GP(model):
|
|||
|
||||
if self.EP:
|
||||
pb.subplot(212)
|
||||
self.likelihood.plot(Xnew,phi,self.X)
|
||||
self.likelihood.plot(Xnew,m,v,phi,self.X,samples=samples)
|
||||
pb.xlim(xmin,xmax)
|
||||
|
||||
elif self.X.shape[1]==2:
|
||||
|
|
|
|||
|
|
@ -151,7 +151,6 @@ class sparse_GP(GP):
|
|||
else:
|
||||
self.ep_approx = Full(self.X,self.likelihood,self.kernel,inducing=None,epsilon=self.epsilon_ep,power_ep=[self.eta,self.delta])
|
||||
self.beta, self.Y, self.Z_ep = self.ep_approx.fit_EP()
|
||||
print "Aqui toy"
|
||||
self.trbetaYYT = np.sum(np.square(self.Y)*self.beta)
|
||||
self._computations()
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue