more gradient based tomfoolery

This commit is contained in:
James Hensman 2014-01-24 14:15:32 +00:00
parent 7b5e8a9ffc
commit c1a416debc
4 changed files with 4 additions and 6 deletions

View file

@ -33,7 +33,8 @@ class Model(Parameterized):
#def dK_d(self, param, dL_dK, X, X2) #def dK_d(self, param, dL_dK, X, X2)
g = np.zeros(self.size) g = np.zeros(self.size)
try: try:
[g.__setitem__(s, self.gradient_mapping[p]().flat) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed] #[g.__setitem__(s, self.gradient_mapping[p]().flat) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed]
[g.__setitem__(s, p.gradient.flat) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed]
except KeyError: except KeyError:
raise KeyError, 'Gradient for {} not defined, please specify gradients for parameters to optimize'.format(p.name) raise KeyError, 'Gradient for {} not defined, please specify gradients for parameters to optimize'.format(p.name)
return g return g

View file

@ -49,7 +49,7 @@ class ExactGaussianInference(object):
dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi)
kern.update_gradients_full(dL_dK) kern.update_gradients_full(dL_dK, X)
likelihood.update_gradients(np.diag(dL_dK)) likelihood.update_gradients(np.diag(dL_dK))

View file

@ -18,7 +18,6 @@ class Posterior(object):
""" """
log_marginal: log p(Y|X) log_marginal: log p(Y|X)
dL_dK: d/dK log p(Y|X) dL_dK: d/dK log p(Y|X)
dL_dtheta_lik : d/dtheta log p(Y|X) (where theta are the parameters of the likelihood)
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
K : the proir covariance (required for lazy computation of various quantities) K : the proir covariance (required for lazy computation of various quantities)
@ -29,7 +28,6 @@ class Posterior(object):
log_marginal log_marginal
dL_dK dL_dK
dL_dtheta_lik
K (for lazy computation) K (for lazy computation)
You may supply either: You may supply either:
@ -50,7 +48,6 @@ class Posterior(object):
#obligatory #obligatory
self.log_marginal = log_marginal self.log_marginal = log_marginal
self.dL_dK = dL_dK self.dL_dK = dL_dK
self.dL_dtheta_lik = dL_dtheta_lik
self._K = K self._K = K
if ((woodbury_chol is not None) and (woodbury_vector is not None) and (K is not None)) or ((mean is not None) and (cov is not None) and (K is not None)): if ((woodbury_chol is not None) and (woodbury_vector is not None) and (K is not None)) or ((mean is not None) and (cov is not None) and (K is not None)):

View file

@ -52,7 +52,7 @@ class Gaussian(Likelihood):
def covariance_matrix(self, Y, Y_metadata=None): def covariance_matrix(self, Y, Y_metadata=None):
return np.eye(Y.shape[0]) * self.variance return np.eye(Y.shape[0]) * self.variance
def set_gradients(self, partial): def update_gradients(self, partial):
self.variance.gradient = np.sum(partial) self.variance.gradient = np.sum(partial)
def _preprocess_values(self, Y): def _preprocess_values(self, Y):