diff --git a/GPy/core/model.py b/GPy/core/model.py index 6d90e13a..0990e7f1 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -302,7 +302,7 @@ class Model(Parameterized): denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) - return np.abs(1. - global_ratio) < tolerance) + return np.abs(1. - global_ratio) < tolerance else: # check the gradient of each parameter individually, and do some pretty printing try: diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 23f8e690..d137ceff 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -60,7 +60,7 @@ class SparseGP(GP): dL_dKmm = self.grad_dict.pop('dL_dKmm') self.kern.update_gradients_full(dL_dKmm, self.Z, None) target = self.kern.gradient.copy() - self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) + self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2']) self.kern.gradient += target #gradients wrt Z diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d54b3871..2a35ad3b 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -252,7 +252,7 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize +#TODO: turn off grad checkingwrt X for indexed kernels like coregionalize # class KernelGradientTestsContinuous1D(unittest.TestCase): # def setUp(self): # self.N, self.D = 100, 1 diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 792d82e2..ea09292a 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -48,7 +48,7 @@ class Cacher(object): if k in kw and kw[k] is not None: return self.operation(*args, **kw) # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING - return self.operation(*args) + #return self.operation(*args) #if the result is cached, return the cached computation state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs]