Optimizing missing data model, needs tidying but now much faster

This commit is contained in:
Alan Saul 2015-08-27 17:56:08 +03:00
parent 12c335c62e
commit 4143f00540
10 changed files with 275 additions and 195 deletions

View file

@ -80,45 +80,9 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kw):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices, **kw)
if self.has_uncertain_inputs():
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
variational_posterior=X,
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
else:
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
if subset_indices is not None:
value_indices['Xgrad'] = subset_indices['samples']
kl_fctr = self.kl_factr
if self.has_uncertain_inputs():
if self.missing_data:
d = self.output_dim
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
else:
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
X.mean.gradient[:] = 0
X.variance.gradient[:] = 0
self.variational_prior.update_gradients_KL(X)
if self.missing_data:
current_values['meangrad'] += kl_fctr*X.mean.gradient/d
current_values['vargrad'] += kl_fctr*X.variance.gradient/d
else:
current_values['meangrad'] += kl_fctr*X.mean.gradient
current_values['vargrad'] += kl_fctr*X.variance.gradient
if subset_indices is not None:
value_indices['meangrad'] = subset_indices['samples']
value_indices['vargrad'] = subset_indices['samples']
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, psi0=None, psi1=None, psi2=None, **kw):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm,
psi0=psi0, psi1=psi1, psi2=psi2, subset_indices=subset_indices, **kw)
return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices
def _outer_values_update(self, full_values):
@ -127,6 +91,47 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
E.g. set the gradients of parameters, etc.
"""
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
current_values = full_values
grad_dict = current_values
if self.has_uncertain_inputs():
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
variational_posterior=self.X,
Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'],
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
else:
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], self.X, self.Z)
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], self.X)
if subset_indices is not None:
value_indices['Xgrad'] = subset_indices['samples']
kl_fctr = self.kl_factr
if self.has_uncertain_inputs():
#if self.missing_data:
#d = self.output_dim
#log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)/d
#else:
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
self.X.mean.gradient[:] = 0
self.X.variance.gradient[:] = 0
self.variational_prior.update_gradients_KL(self.X)
#if self.missing_data:
#current_values['meangrad'] += kl_fctr*self.X.mean.gradient/d
#current_values['vargrad'] += kl_fctr*self.X.variance.gradient/d
#else:
current_values['meangrad'] += kl_fctr*self.X.mean.gradient
current_values['vargrad'] += kl_fctr*self.X.variance.gradient
#if subset_indices is not None:
#value_indices['meangrad'] = subset_indices['samples']
#value_indices['vargrad'] = subset_indices['samples']
full_values = current_values
if self.has_uncertain_inputs():
self.X.mean.gradient = full_values['meangrad']
self.X.variance.gradient = full_values['vargrad']
@ -134,11 +139,13 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
self.X.gradient = full_values['Xgrad']
def _outer_init_full_values(self):
if self.has_uncertain_inputs():
return dict(meangrad=np.zeros(self.X.mean.shape),
vargrad=np.zeros(self.X.variance.shape))
else:
return dict(Xgrad=np.zeros(self.X.shape))
full_values = super(BayesianGPLVMMiniBatch, self)._outer_init_full_values()
#if self.has_uncertain_inputs():
#return dict(meangrad=np.zeros(self.X.mean.shape),
#vargrad=np.zeros(self.X.variance.shape))
#else:
#return dict(Xgrad=np.zeros(self.X.shape))
return full_values
def parameters_changed(self):
super(BayesianGPLVMMiniBatch,self).parameters_changed()

View file

@ -44,7 +44,7 @@ class SparseGPMiniBatch(SparseGP):
def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None,
name='sparse gp', Y_metadata=None, normalizer=False,
missing_data=False, stochastic=False, batchsize=1):
# pick a sensible inference method
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian):
@ -80,7 +80,7 @@ class SparseGPMiniBatch(SparseGP):
def has_uncertain_inputs(self):
return isinstance(self.X, VariationalPosterior)
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kwargs):
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, psi0=None, psi1=None, psi2=None, missing_inds=None, **kwargs):
"""
This is the standard part, which usually belongs in parameters_changed.
@ -99,46 +99,22 @@ class SparseGPMiniBatch(SparseGP):
like them into this dictionary for inner use of the indices inside the
algorithm.
"""
try:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None, **kwargs)
except:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata)
if psi2 is None:
psi2_sum_n = None
else:
psi2_sum_n = psi2.sum(axis=0)
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None, psi0=psi0, psi1=psi1, psi2=psi2_sum_n, **kwargs)
current_values = {}
likelihood.update_gradients(grad_dict['dL_dthetaL'])
current_values['likgrad'] = likelihood.gradient.copy()
if subset_indices is None:
subset_indices = {}
if isinstance(X, VariationalPosterior):
#gradients wrt kernel
dL_dKmm = grad_dict['dL_dKmm']
kern.update_gradients_full(dL_dKmm, Z, None)
current_values['kerngrad'] = kern.gradient.copy()
kern.update_gradients_expectations(variational_posterior=X,
Z=Z,
dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
current_values['kerngrad'] += kern.gradient
current_values['dL_dpsi0'] = grad_dict['dL_dpsi0']
current_values['dL_dpsi1'] = grad_dict['dL_dpsi1']
current_values['dL_dpsi2'] = grad_dict['dL_dpsi2']
current_values['dL_dKmm'] = grad_dict['dL_dKmm']
#gradients wrt Z
current_values['Zgrad'] = kern.gradients_X(dL_dKmm, Z)
current_values['Zgrad'] += kern.gradients_Z_expectations(
grad_dict['dL_dpsi0'],
grad_dict['dL_dpsi1'],
grad_dict['dL_dpsi2'],
Z=Z,
variational_posterior=X)
else:
#gradients wrt kernel
kern.update_gradients_diag(grad_dict['dL_dKdiag'], X)
current_values['kerngrad'] = kern.gradient.copy()
kern.update_gradients_full(grad_dict['dL_dKnm'], X, Z)
current_values['kerngrad'] += kern.gradient
kern.update_gradients_full(grad_dict['dL_dKmm'], Z, None)
current_values['kerngrad'] += kern.gradient
#gradients wrt Z
current_values['Zgrad'] = kern.gradients_X(grad_dict['dL_dKmm'], Z)
current_values['Zgrad'] += kern.gradients_X(grad_dict['dL_dKnm'].T, Z, X)
#current_values = grad_dict
return posterior, log_marginal_likelihood, grad_dict, current_values, subset_indices
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
@ -192,9 +168,43 @@ class SparseGPMiniBatch(SparseGP):
Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc.
"""
self.likelihood.gradient = full_values['likgrad']
self.kern.gradient = full_values['kerngrad']
self.Z.gradient = full_values['Zgrad']
grad_dict = full_values
current_values = full_values
#current_values = {}
if isinstance(self.X, VariationalPosterior):
#gradients wrt kernel
dL_dKmm = grad_dict['dL_dKmm']
self.kern.update_gradients_full(dL_dKmm, self.Z, None)
current_values['kerngrad'] = self.kern.gradient.copy()
self.kern.update_gradients_expectations(variational_posterior=self.X,
Z=self.Z,
dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
current_values['kerngrad'] += self.kern.gradient
#gradients wrt Z
current_values['Zgrad'] = self.kern.gradients_X(dL_dKmm, self.Z)
current_values['Zgrad'] += self.kern.gradients_Z_expectations(
grad_dict['dL_dpsi0'],
grad_dict['dL_dpsi1'],
grad_dict['dL_dpsi2'],
Z=self.Z,
variational_posterior=self.X)
else:
#gradients wrt kernel
kern.update_gradients_diag(grad_dict['dL_dKdiag'], self.X)
current_values['kerngrad'] = self.kern.gradient.copy()
kern.update_gradients_full(grad_dict['dL_dKnm'], self.X, self.Z)
current_values['kerngrad'] += kern.gradient
kern.update_gradients_full(grad_dict['dL_dKmm'], self.Z, None)
current_values['kerngrad'] += kern.gradient
#gradients wrt Z
current_values['Zgrad'] = kern.gradients_X(grad_dict['dL_dKmm'], self.Z)
current_values['Zgrad'] += kern.gradients_X(grad_dict['dL_dKnm'].T, self.Z, self.X)
self.likelihood.gradient = current_values['likgrad']
self.kern.gradient = current_values['kerngrad']
self.Z.gradient = current_values['Zgrad']
def _outer_init_full_values(self):
"""
@ -209,7 +219,8 @@ class SparseGPMiniBatch(SparseGP):
to initialize the gradients for the mean and the variance in order to
have the full gradient for indexing)
"""
return {}
return {'dL_dpsi0': np.zeros(self.X.shape[0]),
'dL_dpsi1': np.zeros((self.X.shape[0], self.Z.shape[0]))}
def _outer_loop_for_missing_data(self):
Lm = None
@ -230,20 +241,35 @@ class SparseGPMiniBatch(SparseGP):
message = m_f(-1)
print(message, end=' ')
for d, ninan in self.stochastics.d:
#Compute the psi statistics for N once, but don't sum out N in psi2
self.kern.return_psi2_n = True
psi0 = self.kern.psi0(self.Z, self.X)
psi1 = self.kern.psi1(self.Z, self.X)
psi2 = self.kern.psi2(self.Z, self.X)
self.psi0 = psi0
self.psi1 = psi1
self.psi2 = psi2
for d, ninan in self.stochastics.d:
if not self.stochastics:
print(' '*(len(message)) + '\r', end=' ')
message = m_f(d)
print(message, end=' ')
psi0ni = psi0[ninan]
psi1ni = psi1[ninan]
psi2ni = psi2[ninan]
posterior, log_marginal_likelihood, \
grad_dict, current_values, value_indices = self._inner_parameters_changed(
self.kern, self.X[ninan],
self.Z, self.likelihood,
self.Y_normalized[ninan][:, d], self.Y_metadata,
Lm, dL_dKmm,
subset_indices=dict(outputs=d, samples=ninan))
subset_indices=dict(outputs=d, samples=ninan,
dL_dpsi0=ninan,
dL_dpsi1=ninan),
psi0=psi0ni, psi1=psi1ni, psi2=psi2ni)
self._inner_take_over_or_update(self.full_values, current_values, value_indices)
self._inner_values_update(current_values)
@ -253,6 +279,7 @@ class SparseGPMiniBatch(SparseGP):
woodbury_inv[:, :, d] = posterior.woodbury_inv[:,:,None]
woodbury_vector[:, d] = posterior.woodbury_vector
self._log_marginal_likelihood += log_marginal_likelihood
if not self.stochastics:
print('')