mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
[var dtc missing] deleted code for missing data inference
This commit is contained in:
parent
f916717aad
commit
894829ffeb
1 changed files with 0 additions and 198 deletions
|
|
@ -182,204 +182,6 @@ class VarDTC(LatentFunctionInference):
|
|||
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
return post, log_marginal, grad_dict
|
||||
|
||||
class VarDTCMissingData(LatentFunctionInference):
|
||||
const_jitter = 1e-10
|
||||
def __init__(self, limit=1, inan=None):
|
||||
from ...util.caching import Cacher
|
||||
self._Y = Cacher(self._subarray_computations, limit)
|
||||
if inan is not None: self._inan = ~inan
|
||||
else: self._inan = None
|
||||
pass
|
||||
|
||||
def set_limit(self, limit):
|
||||
self._Y.limit = limit
|
||||
|
||||
def __getstate__(self):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
return self._Y.limit, self._inan
|
||||
|
||||
def __setstate__(self, state):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
from ...util.caching import Cacher
|
||||
self.limit = state[0]
|
||||
self._inan = state[1]
|
||||
self._Y = Cacher(self._subarray_computations, self.limit)
|
||||
|
||||
def _subarray_computations(self, Y):
|
||||
if self._inan is None:
|
||||
inan = np.isnan(Y)
|
||||
has_none = inan.any()
|
||||
self._inan = ~inan
|
||||
inan = self._inan
|
||||
else:
|
||||
inan = self._inan
|
||||
has_none = True
|
||||
if has_none:
|
||||
#print "caching missing data slices, this can take several minutes depending on the number of unique dimensions of the data..."
|
||||
#csa = common_subarrays(inan, 1)
|
||||
size = Y.shape[1]
|
||||
#logger.info('preparing subarrays {:3.3%}'.format((i+1.)/size))
|
||||
Ys = []
|
||||
next_ten = [0.]
|
||||
count = itertools.count()
|
||||
for v, y in itertools.izip(inan.T, Y.T[:,:,None]):
|
||||
i = count.next()
|
||||
if ((i+1.)/size) >= next_ten[0]:
|
||||
logger.info('preparing subarrays {:>6.1%}'.format((i+1.)/size))
|
||||
next_ten[0] += .1
|
||||
Ys.append(y[v,:])
|
||||
|
||||
next_ten = [0.]
|
||||
count = itertools.count()
|
||||
def trace(y):
|
||||
i = count.next()
|
||||
if ((i+1.)/size) >= next_ten[0]:
|
||||
logger.info('preparing traces {:>6.1%}'.format((i+1.)/size))
|
||||
next_ten[0] += .1
|
||||
y = y[inan[:,i],i:i+1]
|
||||
return np.einsum('ij,ij->', y,y)
|
||||
traces = [trace(Y) for _ in xrange(size)]
|
||||
return Ys, traces
|
||||
else:
|
||||
self._subarray_indices = [[slice(None),slice(None)]]
|
||||
return [Y], [(Y**2).sum()]
|
||||
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None):
|
||||
if isinstance(X, VariationalPosterior):
|
||||
uncertain_inputs = True
|
||||
psi0_all = kern.psi0(Z, X)
|
||||
psi1_all = kern.psi1(Z, X)
|
||||
else:
|
||||
uncertain_inputs = False
|
||||
psi0_all = kern.Kdiag(X)
|
||||
psi1_all = kern.K(X, Z)
|
||||
|
||||
Ys, traces = self._Y(Y)
|
||||
beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
|
||||
het_noise = beta_all.size != 1
|
||||
|
||||
num_inducing = Z.shape[0]
|
||||
|
||||
dL_dpsi0_all = np.zeros(Y.shape[0])
|
||||
dL_dpsi1_all = np.zeros((Y.shape[0], num_inducing))
|
||||
if uncertain_inputs:
|
||||
dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing))
|
||||
|
||||
dL_dR = 0
|
||||
woodbury_vector = np.zeros((num_inducing, Y.shape[1]))
|
||||
woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1]))
|
||||
dL_dKmm = 0
|
||||
log_marginal = 0
|
||||
|
||||
Kmm = kern.K(Z).copy()
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
#factor Kmm
|
||||
if Lm is None:
|
||||
Lm = jitchol(Kmm)
|
||||
if uncertain_inputs: LmInv = dtrtri(Lm)
|
||||
|
||||
size = len(Ys)
|
||||
next_ten = 0
|
||||
for i, [y, v, trYYT] in enumerate(itertools.izip(Ys, self._inan.T, traces)):
|
||||
if ((i+1.)/size) >= next_ten:
|
||||
logger.info('inference {:> 6.1%}'.format((i+1.)/size))
|
||||
next_ten += .1
|
||||
if het_noise: beta = beta_all[i]
|
||||
else: beta = beta_all
|
||||
VVT_factor = (y*beta)
|
||||
output_dim = 1#len(ind)
|
||||
|
||||
psi0 = psi0_all[v]
|
||||
psi1 = psi1_all[v, :]
|
||||
if uncertain_inputs:
|
||||
psi2 = kern.psi2(Z, X[v, :])
|
||||
else: psi2 = None
|
||||
num_data = psi1.shape[0]
|
||||
|
||||
if uncertain_inputs:
|
||||
if het_noise: psi2_beta = psi2 * (beta.flatten().reshape(num_data, 1, 1)).sum(0)
|
||||
else: psi2_beta = psi2 * (beta)
|
||||
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
||||
else:
|
||||
if het_noise: tmp = psi1 * (np.sqrt(beta.reshape(num_data, 1)))
|
||||
else: tmp = psi1 * (np.sqrt(beta))
|
||||
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
|
||||
A = tdot(tmp) #print A.sum()
|
||||
|
||||
# factor B
|
||||
B = np.eye(num_inducing) + A
|
||||
LB = jitchol(B)
|
||||
|
||||
psi1Vf = psi1.T.dot(VVT_factor)
|
||||
tmp, _ = dtrtrs(Lm, psi1Vf, lower=1, trans=0)
|
||||
_LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0)
|
||||
tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1)
|
||||
Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
|
||||
|
||||
# data fit and derivative of L w.r.t. Kmm
|
||||
delit = tdot(_LBi_Lmi_psi1Vf)
|
||||
data_fit = np.trace(delit)
|
||||
DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit)
|
||||
delit = -0.5 * DBi_plus_BiPBi
|
||||
delit += -0.5 * B * output_dim
|
||||
delit += output_dim * np.eye(num_inducing)
|
||||
dL_dKmm += backsub_both_sides(Lm, delit)
|
||||
|
||||
# derivatives of L w.r.t. psi
|
||||
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm,
|
||||
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
|
||||
psi1, het_noise, uncertain_inputs)
|
||||
|
||||
dL_dpsi0_all[v] += dL_dpsi0
|
||||
dL_dpsi1_all[v, :] += dL_dpsi1
|
||||
if uncertain_inputs:
|
||||
dL_dpsi2_all[v, :, :] += dL_dpsi2
|
||||
|
||||
# log marginal likelihood
|
||||
log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
|
||||
psi0, A, LB, trYYT, data_fit, VVT_factor)
|
||||
|
||||
#put the gradients in the right places
|
||||
dL_dR += _compute_dL_dR(likelihood,
|
||||
het_noise, uncertain_inputs, LB,
|
||||
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
|
||||
psi0, psi1, beta,
|
||||
data_fit, num_data, output_dim, trYYT, Y)
|
||||
|
||||
#if full_VVT_factor:
|
||||
woodbury_vector[:, i:i+1] = Cpsi1Vf
|
||||
#else:
|
||||
# print 'foobar'
|
||||
# tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
|
||||
# tmp, _ = dpotrs(LB, tmp, lower=1)
|
||||
# woodbury_vector[:, ind] = dtrtrs(Lm, tmp, lower=1, trans=1)[0]
|
||||
|
||||
#import ipdb;ipdb.set_trace()
|
||||
Bi, _ = dpotri(LB, lower=1)
|
||||
symmetrify(Bi)
|
||||
Bi = -dpotri(LB, lower=1)[0]
|
||||
diag.add(Bi, 1)
|
||||
woodbury_inv_all[:, :, i:i+1] = backsub_both_sides(Lm, Bi)[:,:,None]
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
|
||||
# gradients:
|
||||
if uncertain_inputs:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dpsi0':dL_dpsi0_all,
|
||||
'dL_dpsi1':dL_dpsi1_all,
|
||||
'dL_dpsi2':dL_dpsi2_all,
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
else:
|
||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||
'dL_dKdiag':dL_dpsi0_all,
|
||||
'dL_dKnm':dL_dpsi1_all,
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
post = Posterior(woodbury_inv=woodbury_inv_all, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
|
||||
return post, log_marginal, grad_dict
|
||||
|
||||
def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs):
|
||||
dL_dpsi0 = -0.5 * output_dim * (beta[:,None] * np.ones([num_data, 1])).flatten()
|
||||
dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue