mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
a more efficient implementation of prediction with uncertain inputs
This commit is contained in:
parent
e6261c787c
commit
8689b3dfd0
1 changed files with 27 additions and 19 deletions
|
|
@ -160,32 +160,40 @@ class SparseGP(GP):
|
||||||
else:
|
else:
|
||||||
psi0_star = kern.psi0(self._predictive_variable, Xnew)
|
psi0_star = kern.psi0(self._predictive_variable, Xnew)
|
||||||
psi1_star = kern.psi1(self._predictive_variable, Xnew)
|
psi1_star = kern.psi1(self._predictive_variable, Xnew)
|
||||||
#psi2_star = kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code.
|
psi2_star = kern.psi2n(self._predictive_variable, Xnew)
|
||||||
la = self.posterior.woodbury_vector
|
la = self.posterior.woodbury_vector
|
||||||
mu = np.dot(psi1_star, la) # TODO: dimensions?
|
mu = np.dot(psi1_star, la) # TODO: dimensions?
|
||||||
|
N,M,D = psi0_star.shape[0],psi1_star.shape[1], la.shape[1]
|
||||||
|
|
||||||
if full_cov:
|
if full_cov:
|
||||||
raise NotImplementedError("Full covariance for Sparse GP predicted with uncertain inputs not implemented yet.")
|
raise NotImplementedError("Full covariance for Sparse GP predicted with uncertain inputs not implemented yet.")
|
||||||
var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1]))
|
var = np.zeros((Xnew.shape[0], la.shape[1], la.shape[1]))
|
||||||
di = np.diag_indices(la.shape[1])
|
di = np.diag_indices(la.shape[1])
|
||||||
else:
|
else:
|
||||||
var = np.empty((Xnew.shape[0], la.shape[1]))
|
tmp = psi2_star - psi1_star[:,:,None]*psi1_star[:,None,:]
|
||||||
|
var = (tmp.reshape(-1,M).dot(la).reshape(N,M,D)*la[None,:,:]).sum(1) + psi0_star[:,None]
|
||||||
for i in range(Xnew.shape[0]):
|
if self.posterior.woodbury_inv.ndim==2:
|
||||||
_mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]]
|
var += -psi2_star.reshape(N,-1).dot(self.posterior.woodbury_inv.flat)[:,None]
|
||||||
psi2_star = kern.psi2(self._predictive_variable, NormalPosterior(_mu, _var))
|
|
||||||
tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]]))
|
|
||||||
|
|
||||||
var_ = mdot(la.T, tmp, la)
|
|
||||||
p0 = psi0_star[i]
|
|
||||||
t = np.atleast_3d(self.posterior.woodbury_inv)
|
|
||||||
t2 = np.trace(t.T.dot(psi2_star), axis1=1, axis2=2)
|
|
||||||
|
|
||||||
if full_cov:
|
|
||||||
var_[di] += p0
|
|
||||||
var_[di] += -t2
|
|
||||||
var[i] = var_
|
|
||||||
else:
|
else:
|
||||||
var[i] = np.diag(var_)+p0-t2
|
var += -psi2_star.reshape(N,-1).dot(self.posterior.woodbury_inv.reshape(-1,D))
|
||||||
|
assert np.all(var>=-1e-5), "The predicted variance goes negative!: "+str(var)
|
||||||
|
var = np.clip(var,1e-15,np.inf)
|
||||||
|
|
||||||
|
# for i in range(Xnew.shape[0]):
|
||||||
|
# _mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]]
|
||||||
|
# psi2_star = kern.psi2(self._predictive_variable, NormalPosterior(_mu, _var))
|
||||||
|
# tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]]))
|
||||||
|
#
|
||||||
|
# var_ = mdot(la.T, tmp, la)
|
||||||
|
# p0 = psi0_star[i]
|
||||||
|
# t = np.atleast_3d(self.posterior.woodbury_inv)
|
||||||
|
# t2 = np.trace(t.T.dot(psi2_star), axis1=1, axis2=2)
|
||||||
|
#
|
||||||
|
# if full_cov:
|
||||||
|
# var_[di] += p0
|
||||||
|
# var_[di] += -t2
|
||||||
|
# var[i] = var_
|
||||||
|
# else:
|
||||||
|
# var[i] = np.diag(var_)+p0-t2
|
||||||
|
|
||||||
return mu, var
|
return mu, var
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue