[minor] minor changes

This commit is contained in:
mzwiessele 2014-08-05 08:27:40 -07:00
parent ece85f02fe
commit 93b92111f8
3 changed files with 4 additions and 30 deletions

View file

@ -349,7 +349,7 @@ class Model(Parameterized):
numerical_gradient = (f1 - f2) / (2 * step) numerical_gradient = (f1 - f2) / (2 * step)
if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind] if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind]
else: ratio = (f1 - f2) / (2 * step * gradient[xind]) else: ratio = (f1 - f2) / (2 * step * gradient[xind])
difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance: if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[nind]) formatted_name = "\033[92m {0} \033[0m".format(names[nind])

View file

@ -699,36 +699,10 @@ class OptimizationHandlable(Indexable):
def _get_params_transformed(self): def _get_params_transformed(self):
raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
# # transformed parameters (apply un-transformation rules)
# p = self.param_array.copy()
# [np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
# if self.has_parent() and self.constraints[__fixed__].size != 0:
# fixes = np.ones(self.size).astype(bool)
# fixes[self.constraints[__fixed__]] = FIXED
# return p[fixes]
# elif self._has_fixes():
# return p[self._fixes_]
# return p
# #
def _set_params_transformed(self, p): def _set_params_transformed(self, p):
raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
# """
# Set parameters p, but make sure they get transformed before setting.
# This means, the optimizer sees p, whereas the model sees transformed(p),
# such that, the parameters the model sees are in the right domain.
# """
# if not(p is self.param_array):
# if self.has_parent() and self.constraints[__fixed__].size != 0:
# fixes = np.ones(self.size).astype(bool)
# fixes[self.constraints[__fixed__]] = FIXED
# self.param_array.flat[fixes] = p
# elif self._has_fixes(): self.param_array.flat[self._fixes_] = p
# else: self.param_array.flat = p
# [np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
# for c, ind in self.constraints.iteritems() if c != __fixed__]
# self._trigger_params_changed()
def _trigger_params_changed(self, trigger_parent=True): def _trigger_params_changed(self, trigger_parent=True):
""" """
First tell all children to update, First tell all children to update,
@ -736,7 +710,7 @@ class OptimizationHandlable(Indexable):
If trigger_parent is True, we will tell the parent, otherwise not. If trigger_parent is True, we will tell the parent, otherwise not.
""" """
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters] [p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf) self.notify_observers(None, None if trigger_parent else -np.inf)
def _size_transformed(self): def _size_transformed(self):

View file

@ -10,7 +10,7 @@ class Add(CombinationKernel):
""" """
Add given list of kernels together. Add given list of kernels together.
propagates gradients through. propagates gradients through.
This kernel will take over the active dims of it's subkernels passed in. This kernel will take over the active dims of it's subkernels passed in.
""" """
def __init__(self, subkerns, name='add'): def __init__(self, subkerns, name='add'):
@ -40,7 +40,7 @@ class Add(CombinationKernel):
return reduce(np.add, (p.Kdiag(X) for p in which_parts)) return reduce(np.add, (p.Kdiag(X) for p in which_parts))
def update_gradients_full(self, dL_dK, X, X2=None): def update_gradients_full(self, dL_dK, X, X2=None):
[p.update_gradients_full(dL_dK, X, X2) for p in self.parts] [p.update_gradients_full(dL_dK, X, X2) for p in self.parts if not p.is_fixed]
def update_gradients_diag(self, dL_dK, X): def update_gradients_diag(self, dL_dK, X):
[p.update_gradients_diag(dL_dK, X) for p in self.parts] [p.update_gradients_diag(dL_dK, X) for p in self.parts]