catch linalg errors inside model and more sopihisticated non-pd checks

This commit is contained in:
Max Zwiessele 2013-06-07 13:34:45 +01:00
parent 695fad4ed8
commit 924a319b98
2 changed files with 22 additions and 7 deletions

View file

@ -10,6 +10,7 @@ from parameterised import Parameterised
import multiprocessing as mp
import numpy as np
from GPy.core.domains import POSITIVE, REAL
from numpy.linalg.linalg import LinAlgError
# import numdifftools as ndt
class Model(Parameterised):
@ -227,20 +228,32 @@ class Model(Parameterised):
"""
The objective function passed to the optimizer. It combines the likelihood and the priors.
"""
self._set_params_transformed(x)
try:
self._set_params_transformed(x)
except (LinAlgError, ZeroDivisionError) as e:
print "DEBUG: catching linalg, rejecting step" # TODO: delete
return np.inf
return -self.log_likelihood() - self.log_prior()
def objective_function_gradients(self, x):
"""
Gets the gradients from the likelihood and the priors.
"""
self._set_params_transformed(x)
try:
self._set_params_transformed(x)
except (LinAlgError, ZeroDivisionError) as e:
print "DEBUG: catching linalg, rejecting step" # TODO: delete
# return np.ones_like(x)
obj_grads = -self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_grads
def objective_and_gradients(self, x):
self._set_params_transformed(x)
obj_f = -self.log_likelihood() - self.log_prior()
try:
self._set_params_transformed(x)
obj_f = -self.log_likelihood() - self.log_prior()
except (LinAlgError, ZeroDivisionError) as e:
print "DEBUG: catching linalg, rejecting step" # TODO: delete
obj_f = np.inf
obj_grads = -self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_f, obj_grads

View file

@ -96,15 +96,17 @@ def jitchol(A, maxtries=5):
return L
else:
diagA = np.diag(A)
if np.any(diagA < 0.):
raise linalg.LinAlgError, "not pd: negative diagonal elements"
if np.any(diagA <= 0.):
raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
jitter = diagA.mean() * 1e-6
for i in range(1, maxtries + 1):
while maxtries > 0 and np.isfinite(jitter):
print 'Warning: adding jitter of {:.10e}'.format(jitter)
try:
return linalg.cholesky(A + np.eye(A.shape[0]).T * jitter, lower=True)
except:
jitter *= 10
finally:
maxtries -= 1
raise linalg.LinAlgError, "not positive definite, even with jitter."