diff --git a/GPy/core/model.py b/GPy/core/model.py index f26bf2ee..5e228b15 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -304,54 +304,62 @@ class model(parameterised): return '\n'.join(s) - def checkgrad(self, verbose=False, include_priors=False, step=1e-6, tolerance = 1e-3, return_ratio=False, *args): + def checkgrad(self, verbose=False, include_priors=False, step=1e-6, tolerance = 1e-3): """ Check the gradient of the model by comparing to a numerical estimate. - If the overall gradient fails, invividual components are tested. + If the verbose flag is passed, invividual components are tested (and printed) + + :param verbose: If True, print a "full" checking of each parameter + :type verbose: bool + :param step: The size of the step around which to linearise the objective + :type step: float (defaul 1e-6) + :param tolerance: the tolerance allowed (see note) + :type tolerance: float (default 1e-3) + + Note:- + The gradient is considered correct if the ratio of the analytical + and numerical gradients is within of unity. """ x = self._get_params_transformed().copy() - #choose a random direction to step in: - dx = step*np.sign(np.random.uniform(-1,1,x.size)) + if not verbose: + #just check the global ratio + dx = step*np.sign(np.random.uniform(-1,1,x.size)) - #evaulate around the point x - self._set_params_transformed(x+dx) - f1,g1 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() - self._set_params_transformed(x-dx) - f2,g2 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() - self._set_params_transformed(x) - gradient = self._log_likelihood_gradients_transformed() + #evaulate around the point x + self._set_params_transformed(x+dx) + f1,g1 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() + self._set_params_transformed(x-dx) + f2,g2 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() + self._set_params_transformed(x) + gradient = self._log_likelihood_gradients_transformed() - numerical_gradient = (f1-f2)/(2*dx) - global_ratio = (f1-f2)/(2*np.dot(dx,gradient)) - if verbose: - print "Gradient ratio = ", global_ratio, '\n' - sys.stdout.flush() + numerical_gradient = (f1-f2)/(2*dx) + global_ratio = (f1-f2)/(2*np.dot(dx,gradient)) - if (np.abs(1.-global_ratio)