simplified the checkgrad logic somewhat

This commit is contained in:
James Hensman 2013-02-04 12:36:08 +00:00
parent 9b69b04933
commit dacdaa1b41

View file

@ -304,15 +304,27 @@ class model(parameterised):
return '\n'.join(s) return '\n'.join(s)
def checkgrad(self, verbose=False, include_priors=False, step=1e-6, tolerance = 1e-3, return_ratio=False, *args): def checkgrad(self, verbose=False, include_priors=False, step=1e-6, tolerance = 1e-3):
""" """
Check the gradient of the model by comparing to a numerical estimate. Check the gradient of the model by comparing to a numerical estimate.
If the overall gradient fails, invividual components are tested. If the verbose flag is passed, invividual components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (defaul 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
""" """
x = self._get_params_transformed().copy() x = self._get_params_transformed().copy()
#choose a random direction to step in: if not verbose:
#just check the global ratio
dx = step*np.sign(np.random.uniform(-1,1,x.size)) dx = step*np.sign(np.random.uniform(-1,1,x.size))
#evaulate around the point x #evaulate around the point x
@ -325,17 +337,13 @@ class model(parameterised):
numerical_gradient = (f1-f2)/(2*dx) numerical_gradient = (f1-f2)/(2*dx)
global_ratio = (f1-f2)/(2*np.dot(dx,gradient)) global_ratio = (f1-f2)/(2*np.dot(dx,gradient))
if verbose:
print "Gradient ratio = ", global_ratio, '\n'
sys.stdout.flush()
if (np.abs(1.-global_ratio)<tolerance) and not np.isnan(global_ratio): if (np.abs(1.-global_ratio)<tolerance) and not np.isnan(global_ratio):
if verbose: return True
print 'Gradcheck passed'
else: else:
if verbose: return False
print "Global check failed. Testing individual gradients\n" else:
#check the gradient of each parameter individually, and do some pretty printing
try: try:
names = self._get_param_names_transformed() names = self._get_param_names_transformed()
except NotImplementedError: except NotImplementedError:
@ -369,7 +377,6 @@ class model(parameterised):
ratio = (f1-f2)/(2*step*gradient) ratio = (f1-f2)/(2*step*gradient)
difference = np.abs((f1-f2)/2/step - gradient) difference = np.abs((f1-f2)/2/step - gradient)
if verbose:
if (np.abs(ratio-1)<tolerance): if (np.abs(ratio-1)<tolerance):
formatted_name = "\033[92m {0} \033[0m".format(names[i]) formatted_name = "\033[92m {0} \033[0m".format(names[i])
else: else:
@ -380,13 +387,6 @@ class model(parameterised):
ng = '%.6f' % float(numerical_gradient) ng = '%.6f' % float(numerical_gradient)
grad_string = "{0:^{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}".format(formatted_name,r,d,g, ng, c0 = cols[0]+9, c1 = cols[1], c2 = cols[2], c3 = cols[3], c4 = cols[4]) grad_string = "{0:^{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}".format(formatted_name,r,d,g, ng, c0 = cols[0]+9, c1 = cols[1], c2 = cols[2], c3 = cols[3], c4 = cols[4])
print grad_string print grad_string
if verbose:
print ''
if return_ratio:
return global_ratio
else:
return False
def EPEM(self,epsilon=.1,**kwargs): def EPEM(self,epsilon=.1,**kwargs):
""" """