diff --git a/GPy/core/model.py b/GPy/core/model.py index 1595e347..7feb72b2 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -349,7 +349,7 @@ class Model(Parameterized): numerical_gradient = (f1 - f2) / (2 * step) if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind] else: ratio = (f1 - f2) / (2 * step * gradient[xind]) - difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) + difference = np.abs(numerical_gradient - gradient[xind]) if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance: formatted_name = "\033[92m {0} \033[0m".format(names[nind]) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index e359409e..2a036378 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -699,36 +699,10 @@ class OptimizationHandlable(Indexable): def _get_params_transformed(self): raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" -# # transformed parameters (apply un-transformation rules) -# p = self.param_array.copy() -# [np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] -# if self.has_parent() and self.constraints[__fixed__].size != 0: -# fixes = np.ones(self.size).astype(bool) -# fixes[self.constraints[__fixed__]] = FIXED -# return p[fixes] -# elif self._has_fixes(): -# return p[self._fixes_] -# return p # def _set_params_transformed(self, p): raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" -# """ -# Set parameters p, but make sure they get transformed before setting. -# This means, the optimizer sees p, whereas the model sees transformed(p), -# such that, the parameters the model sees are in the right domain. -# """ -# if not(p is self.param_array): -# if self.has_parent() and self.constraints[__fixed__].size != 0: -# fixes = np.ones(self.size).astype(bool) -# fixes[self.constraints[__fixed__]] = FIXED -# self.param_array.flat[fixes] = p -# elif self._has_fixes(): self.param_array.flat[self._fixes_] = p -# else: self.param_array.flat = p -# [np.put(self.param_array, ind, c.f(self.param_array.flat[ind])) -# for c, ind in self.constraints.iteritems() if c != __fixed__] -# self._trigger_params_changed() - def _trigger_params_changed(self, trigger_parent=True): """ First tell all children to update, @@ -736,7 +710,7 @@ class OptimizationHandlable(Indexable): If trigger_parent is True, we will tell the parent, otherwise not. """ - [p._trigger_params_changed(trigger_parent=False) for p in self.parameters] + [p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed] self.notify_observers(None, None if trigger_parent else -np.inf) def _size_transformed(self): diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 12f5d444..ee743f8b 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -10,7 +10,7 @@ class Add(CombinationKernel): """ Add given list of kernels together. propagates gradients through. - + This kernel will take over the active dims of it's subkernels passed in. """ def __init__(self, subkerns, name='add'): @@ -40,7 +40,7 @@ class Add(CombinationKernel): return reduce(np.add, (p.Kdiag(X) for p in which_parts)) def update_gradients_full(self, dL_dK, X, X2=None): - [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] + [p.update_gradients_full(dL_dK, X, X2) for p in self.parts if not p.is_fixed] def update_gradients_diag(self, dL_dK, X): [p.update_gradients_diag(dL_dK, X) for p in self.parts]