Merge branch 'devel' of github.com:/sheffieldml/GPy into devel

This commit is contained in:
James Hensman 2015-02-11 11:49:59 +00:00
commit 7920ff0b4d
7 changed files with 53 additions and 21 deletions

View file

@ -127,7 +127,7 @@ class Coregionalize(Kern):
config.set('weave', 'working', 'False') config.set('weave', 'working', 'False')
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2) dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2)
else: else:
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2) dL_dK_small = self._gradient_reduce_numpy(dL_dK, index, index2)

View file

@ -178,7 +178,7 @@ class Likelihood(Parameterized):
stop stop
dF_dtheta = None # Not yet implemented dF_dtheta = None # Not yet implemented
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), None return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta
def predictive_mean(self, mu, variance, Y_metadata=None): def predictive_mean(self, mu, variance, Y_metadata=None):
""" """

View file

@ -83,8 +83,8 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""Get the gradients of the posterior distribution of X in its specific form.""" """Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient return X.mean.gradient, X.variance.gradient
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None): def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kw):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices) posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices, **kw)
if self.has_uncertain_inputs(): if self.has_uncertain_inputs():
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations( current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(

View file

@ -97,7 +97,7 @@ Created on 3 Nov 2014
def has_uncertain_inputs(self): def has_uncertain_inputs(self):
return isinstance(self.X, VariationalPosterior) return isinstance(self.X, VariationalPosterior)
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None): def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kwargs):
""" """
This is the standard part, which usually belongs in parameters_changed. This is the standard part, which usually belongs in parameters_changed.
@ -117,7 +117,7 @@ Created on 3 Nov 2014
algorithm. algorithm.
""" """
try: try:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None) posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None, **kwargs)
except: except:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata) posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata)
current_values = {} current_values = {}

View file

@ -172,7 +172,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
if hasattr(model,"Z"): if hasattr(model,"Z"):
#Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims] #Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
Zu = Z[:,free_dims] Zu = Z[:,free_dims]
plots['inducing_inputs'] = ax.plot(Zu[:,free_dims[0]], Zu[:,free_dims[1]], 'wo') plots['inducing_inputs'] = ax.plot(Zu[:,0], Zu[:,1], 'wo')
else: else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions" raise NotImplementedError, "Cannot define a frame with more than two input dimensions"

View file

@ -0,0 +1,37 @@
import numpy as np
import scipy as sp
from ..util.linalg import jitchol
class LinalgTests(np.testing.TestCase):
def setUp(self):
#Create PD matrix
A = np.random.randn(20,100)
self.A = A.dot(A.T)
#compute Eigdecomp
vals, vectors = np.linalg.eig(self.A)
#Set smallest eigenval to be negative with 5 rounds worth of jitter
vals[vals.argmin()] = 0
default_jitter = 1e-6*np.mean(vals)
vals[vals.argmin()] = -default_jitter*(10**3.5)
self.A_corrupt = (vectors * vals).dot(vectors.T)
def test_jitchol_success(self):
"""
Expect 5 rounds of jitter to be added and for the recovered matrix to be
identical to the corrupted matrix apart from the jitter added to the diagonal
"""
L = jitchol(self.A_corrupt, maxtries=5)
A_new = L.dot(L.T)
diff = A_new - self.A_corrupt
np.testing.assert_allclose(diff, np.eye(A_new.shape[0])*np.diag(diff).mean(), atol=1e-13)
def test_jitchol_failure(self):
try:
"""
Expecting an exception to be thrown as we expect it to require
5 rounds of jitter to be added to enforce PDness
"""
jitchol(self.A_corrupt, maxtries=4)
return False
except sp.linalg.LinAlgError:
return True

View file

@ -82,6 +82,7 @@ def force_F_ordered(A):
# return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1) # return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1)
def jitchol(A, maxtries=5): def jitchol(A, maxtries=5):
A = np.ascontiguousarray(A) A = np.ascontiguousarray(A)
L, info = lapack.dpotrf(A, lower=1) L, info = lapack.dpotrf(A, lower=1)
@ -92,25 +93,19 @@ def jitchol(A, maxtries=5):
if np.any(diagA <= 0.): if np.any(diagA <= 0.):
raise linalg.LinAlgError, "not pd: non-positive diagonal elements" raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
jitter = diagA.mean() * 1e-6 jitter = diagA.mean() * 1e-6
while maxtries > 0 and np.isfinite(jitter): num_tries = 1
while num_tries <= maxtries and np.isfinite(jitter):
try: try:
L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True) L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)
logging.warning('Added {} rounds of jitter, jitter of {:.10e}\n'.format(num_tries, jitter))
return L
except: except:
jitter *= 10 jitter *= 10
finally: num_tries += 1
maxtries -= 1
raise linalg.LinAlgError, "not positive definite, even with jitter."
import traceback import traceback
try: raise logging.warning('\n'.join(['Added {} rounds of jitter, jitter of {:.10e}'.format(num_tries-1, jitter),
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
import ipdb;ipdb.set_trace() raise linalg.LinAlgError, "not positive definite, even with jitter."
return L
# def dtrtri(L, lower=1): # def dtrtri(L, lower=1):
# """ # """