mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-12 05:22:38 +02:00
Merge branch 'params' of github.com:SheffieldML/GPy into params
Conflicts: GPy/inference/latent_function_inference/var_dtc.py
This commit is contained in:
commit
9e64f116d8
3 changed files with 7 additions and 7 deletions
|
|
@ -65,9 +65,7 @@ class VarDTC(object):
|
||||||
_, output_dim = Y.shape
|
_, output_dim = Y.shape
|
||||||
|
|
||||||
#see whether we've got a different noise variance for each datum
|
#see whether we've got a different noise variance for each datum
|
||||||
#beta = 1./np.fmax(likelihood.variance, 1e-6)
|
|
||||||
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
|
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
|
||||||
|
|
||||||
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
|
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
|
||||||
#self.YYTfactor = self.get_YYTfactor(Y)
|
#self.YYTfactor = self.get_YYTfactor(Y)
|
||||||
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
|
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
|
||||||
|
|
@ -223,7 +221,7 @@ class VarDTCMissingData(object):
|
||||||
psi2_all = None
|
psi2_all = None
|
||||||
|
|
||||||
Ys, traces = self._Y(Y)
|
Ys, traces = self._Y(Y)
|
||||||
beta_all = 1./np.fmax(likelihood.variance, 1e-6)
|
beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
|
||||||
het_noise = beta_all.size != 1
|
het_noise = beta_all.size != 1
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
|
|
@ -330,18 +328,20 @@ class VarDTCMissingData(object):
|
||||||
diag.add(Bi, 1)
|
diag.add(Bi, 1)
|
||||||
woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None]
|
woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None]
|
||||||
|
|
||||||
|
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||||
|
|
||||||
# gradients:
|
# gradients:
|
||||||
if uncertain_inputs:
|
if uncertain_inputs:
|
||||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||||
'dL_dpsi0':dL_dpsi0_all,
|
'dL_dpsi0':dL_dpsi0_all,
|
||||||
'dL_dpsi1':dL_dpsi1_all,
|
'dL_dpsi1':dL_dpsi1_all,
|
||||||
'dL_dpsi2':dL_dpsi2_all,
|
'dL_dpsi2':dL_dpsi2_all,
|
||||||
'dL_dR':dL_dR}
|
'dL_dthetaL':dL_dthetaL}
|
||||||
else:
|
else:
|
||||||
grad_dict = {'dL_dKmm': dL_dKmm,
|
grad_dict = {'dL_dKmm': dL_dKmm,
|
||||||
'dL_dKdiag':dL_dpsi0_all,
|
'dL_dKdiag':dL_dpsi0_all,
|
||||||
'dL_dKnm':dL_dpsi1_all,
|
'dL_dKnm':dL_dpsi1_all,
|
||||||
'dL_dR':dL_dR}
|
'dL_dthetaL':dL_dthetaL}
|
||||||
|
|
||||||
#get sufficient things for posterior prediction
|
#get sufficient things for posterior prediction
|
||||||
#TODO: do we really want to do this in the loop?
|
#TODO: do we really want to do this in the loop?
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ class BayesianGPLVM(SparseGP):
|
||||||
super(BayesianGPLVM, self).parameters_changed()
|
super(BayesianGPLVM, self).parameters_changed()
|
||||||
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
||||||
|
|
||||||
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict)
|
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
|
||||||
|
|
||||||
# update for the KL divergence
|
# update for the KL divergence
|
||||||
self.variational_prior.update_gradients_KL(self.X)
|
self.variational_prior.update_gradients_KL(self.X)
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ class SSGPLVM(SparseGP):
|
||||||
super(SSGPLVM, self).parameters_changed()
|
super(SSGPLVM, self).parameters_changed()
|
||||||
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
|
||||||
|
|
||||||
self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict)
|
self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
|
||||||
|
|
||||||
# update for the KL divergence
|
# update for the KL divergence
|
||||||
self.variational_prior.update_gradients_KL(self.X)
|
self.variational_prior.update_gradients_KL(self.X)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue