diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 7cc39dde..1add8268 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -65,8 +65,6 @@ class GP(Model): self.add_parameter(self.kern) self.add_parameter(self.likelihood) - if self.__class__ is GP: - self.parameters_changed() def parameters_changed(self): self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) diff --git a/GPy/core/model.py b/GPy/core/model.py index bf8915c6..38b5eb29 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -15,6 +15,7 @@ import itertools class Model(Parameterized): _fail_count = 0 # Count of failed optimization steps (see objective) _allowed_failures = 10 # number of allowed failures + def __init__(self, name): super(Model, self).__init__(name) # Parameterized.__init__(self) self.optimization_runs = [] @@ -25,14 +26,8 @@ class Model(Parameterized): raise NotImplementedError, "this needs to be implemented to use the model class" def _log_likelihood_gradients(self): - g = np.zeros(self.size) - try: - [p._collect_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed] - except ValueError: - raise ValueError, 'Gradient for {} not defined, please specify gradients for parameters to optimize'.format(p.name) - return g - raise NotImplementedError, "this needs to be implemented to use the model class" - + return self.gradient + def _getstate(self): """ Get the current state of the class. @@ -208,8 +203,8 @@ class Model(Parameterized): try: self._set_params_transformed(x) obj_f = -float(self.log_likelihood()) - self.log_prior() - self._fail_count = 0 obj_grads = -self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) + self._fail_count = 0 except (LinAlgError, ZeroDivisionError, ValueError) as e: if self._fail_count >= self._allowed_failures: raise e @@ -275,9 +270,8 @@ class Model(Parameterized): The gradient is considered correct if the ratio of the analytical and numerical gradients is within of unity. """ - - x = self._get_params_transformed() - + x = self._get_params_transformed().copy() + if not verbose: # make sure only to test the selected parameters if target_param is None: @@ -368,6 +362,7 @@ class Model(Parameterized): ng = '%.6f' % float(numerical_gradient) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}".format(formatted_name, r, d, g, ng, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4]) print grad_string + self._set_params_transformed(x) return ret diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index cf353ead..27801e23 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -49,7 +49,7 @@ class ObservableArray(np.ndarray, Observable): def __setitem__(self, s, val): if self._s_not_empty(s): super(ObservableArray, self).__setitem__(s, val) - self._notify_observers(self[s]) + self.notify_observers(self[s]) def __getslice__(self, start, stop): return self.__getitem__(slice(start, stop)) @@ -65,149 +65,149 @@ class ObservableArray(np.ndarray, Observable): def __ilshift__(self, *args, **kwargs): r = np.ndarray.__ilshift__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __irshift__(self, *args, **kwargs): r = np.ndarray.__irshift__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __ixor__(self, *args, **kwargs): r = np.ndarray.__ixor__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __ipow__(self, *args, **kwargs): r = np.ndarray.__ipow__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __ifloordiv__(self, *args, **kwargs): r = np.ndarray.__ifloordiv__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __isub__(self, *args, **kwargs): r = np.ndarray.__isub__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __ior__(self, *args, **kwargs): r = np.ndarray.__ior__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __itruediv__(self, *args, **kwargs): r = np.ndarray.__itruediv__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __idiv__(self, *args, **kwargs): r = np.ndarray.__idiv__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __iand__(self, *args, **kwargs): r = np.ndarray.__iand__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __imod__(self, *args, **kwargs): r = np.ndarray.__imod__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __iadd__(self, *args, **kwargs): r = np.ndarray.__iadd__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r def __imul__(self, *args, **kwargs): r = np.ndarray.__imul__(self, *args, **kwargs) - self._notify_observers() + self.notify_observers() return r # def __rrshift__(self, *args, **kwargs): # r = np.ndarray.__rrshift__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __ror__(self, *args, **kwargs): # r = np.ndarray.__ror__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rxor__(self, *args, **kwargs): # r = np.ndarray.__rxor__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rdivmod__(self, *args, **kwargs): # r = np.ndarray.__rdivmod__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __radd__(self, *args, **kwargs): # r = np.ndarray.__radd__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rdiv__(self, *args, **kwargs): # r = np.ndarray.__rdiv__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rtruediv__(self, *args, **kwargs): # r = np.ndarray.__rtruediv__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rshift__(self, *args, **kwargs): # r = np.ndarray.__rshift__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rmul__(self, *args, **kwargs): # r = np.ndarray.__rmul__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rpow__(self, *args, **kwargs): # r = np.ndarray.__rpow__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rsub__(self, *args, **kwargs): # r = np.ndarray.__rsub__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r # def __rfloordiv__(self, *args, **kwargs): # r = np.ndarray.__rfloordiv__(self, *args, **kwargs) -# self._notify_observers() +# self.notify_observers() # return r diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index a9f3768e..f8f6ab5b 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -62,6 +62,7 @@ class ParameterIndexOperations(object): def clear(self): self._properties.clear() + @property def size(self): return reduce(lambda a,b: a+b.size, self.iterindices(), 0) @@ -165,7 +166,7 @@ class ParameterIndexOperationsView(object): for i, ind in self.items(): self._param_index_ops.remove(i, ind+self._offset) - + @property def size(self): return reduce(lambda a,b: a+b.size, self.iterindices(), 0) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 8f4dbaf3..2917cad7 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -54,7 +54,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): obj._tied_to_me_ = SetDict() obj._tied_to_ = [] obj._original_ = True - obj._gradient_ = None + obj._gradient_array_ = numpy.zeros(obj.shape, dtype=numpy.float64) return obj def __init__(self, name, input_array, default_constraint=None, *a, **kw): @@ -77,7 +77,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): # see InfoArray.__array_finalize__ for comments if obj is None: return super(Param, self).__array_finalize__(obj) - self._direct_parent_ = getattr(obj, '_direct_parent_', None) + self._parent_ = getattr(obj, '_parent_', None) self._parent_index_ = getattr(obj, '_parent_index_', None) self._default_constraint_ = getattr(obj, '_default_constraint_', None) self._current_slice_ = getattr(obj, '_current_slice_', None) @@ -89,16 +89,18 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): self._updated_ = getattr(obj, '_updated_', None) self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, 'name', None) - self._gradient_ = getattr(obj, '_gradient_', None) + self._gradient_array_ = getattr(obj, '_gradient_array_', None) self.constraints = getattr(obj, 'constraints', None) self.priors = getattr(obj, 'priors', None) - + @property + def _param_array_(self): + return self + @property def gradient(self): - if self._gradient_ is None: - self._gradient_ = numpy.zeros(self._realshape_) - return self._gradient_[self._current_slice_] + return self._gradient_array_[self._current_slice_] + @gradient.setter def gradient(self, val): self.gradient[:] = val @@ -110,7 +112,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): func, args, state = super(Param, self).__reduce__() return func, args, (state, (self.name, - self._direct_parent_, + self._parent_, self._parent_index_, self._default_constraint_, self._current_slice_, @@ -135,7 +137,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): self._current_slice_ = state.pop() self._default_constraint_ = state.pop() self._parent_index_ = state.pop() - self._direct_parent_ = state.pop() + self._parent_ = state.pop() self.name = state.pop() def copy(self, *args): @@ -148,20 +150,20 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable): #=========================================================================== # get/set parameters #=========================================================================== - def _set_params(self, param, trigger_parent=True): - self.flat = param - if trigger_parent: min_priority = None - else: min_priority = -numpy.inf - self._notify_observers(None, min_priority) - - def _get_params(self): - return self.flat - - def _collect_gradient(self, target): - target += self.gradient.flat - - def _set_gradient(self, g): - self.gradient = g.reshape(self._realshape_) +# def _set_params(self, param, trigger_parent=True): +# self.flat = param +# if trigger_parent: min_priority = None +# else: min_priority = -numpy.inf +# self.notify_observers(None, min_priority) +# +# def _get_params(self): +# return self.flat +# +# def _collect_gradient(self, target): +# target += self.gradient.flat +# +# def _set_gradient(self, g): +# self.gradient = g.reshape(self._realshape_) #=========================================================================== # Array operations -> done @@ -362,7 +364,7 @@ class ParamConcatenation(object): parents = dict() for p in self.params: if p.has_parent(): - parent = p._direct_parent_ + parent = p._parent_ level = 0 while parent is not None: if parent in parents: @@ -370,7 +372,7 @@ class ParamConcatenation(object): else: parents[parent] = level level += 1 - parent = parent._direct_parent_ + parent = parent._parent_ import operator self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1))) #=========================================================================== @@ -397,7 +399,7 @@ class ParamConcatenation(object): #=========================================================================== def update_all_params(self): for par in self.parents: - par._notify_observers(-numpy.inf) + par.notify_observers(-numpy.inf) def constrain(self, constraint, warning=True): [param.constrain(constraint, trigger_parent=False) for param in self.params] diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 109df6e9..c9372b58 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -1,26 +1,51 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) +""" +Core module for parameterization. +This module implements all parameterization techniques, split up in modular bits. + +HierarchyError: +raised when an error with the hierarchy occurs (circles etc.) + +Observable: +Observable Pattern for patameterization + + +""" from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np +import itertools __updated__ = '2013-12-16' class HierarchyError(Exception): """ - Gets thrown when something is wrong with the parameter hierarchy + Gets thrown when something is wrong with the parameter hierarchy. """ def adjust_name_for_printing(name): + """ + Make sure a name can be printed, alongside used as a variable name. + """ if name is not None: - return name.replace(" ", "_").replace(".", "_").replace("-", "").replace("+", "").replace("!", "").replace("*", "").replace("/", "") + return name.replace(" ", "_").replace(".", "_").replace("-", "_m_").replace("+", "_p_").replace("!", "_I_").replace("**", "_xx_").replace("*", "_x_").replace("/", "_l_").replace("@",'_at_') return '' class Observable(object): + """ + Observable pattern for parameterization. + + This Object allows for observers to register with self and a (bound!) function + as an observer. Every time the observable changes, it sends a notification with + self as only argument to all its observers. + """ _updated = True def __init__(self, *args, **kwargs): self._observer_callables_ = [] - + def __del__(self, *args, **kwargs): + del self._observer_callables_ + def add_observer(self, observer, callble, priority=0): self._insert_sorted(priority, observer, callble) @@ -35,8 +60,8 @@ class Observable(object): to_remove.append((p, obs, clble)) for r in to_remove: self._observer_callables_.remove(r) - - def _notify_observers(self, which=None, min_priority=None): + + def notify_observers(self, which=None, min_priority=None): """ Notifies all observers. Which is the element, which kicked off this notification loop. @@ -67,6 +92,41 @@ class Observable(object): self._observer_callables_.insert(ins, (p, o, c)) class Pickleable(object): + """ + Make an object pickleable (See python doc 'pickling'). + + This class allows for pickling support by Memento pattern. + _getstate returns a memento of the class, which gets pickled. + _setstate() (re-)sets the state of the class to the memento + """ + #=========================================================================== + # Pickling operations + #=========================================================================== + def pickle(self, f, protocol=-1): + """ + :param f: either filename or open file object to write to. + if it is an open buffer, you have to make sure to close + it properly. + :param protocol: pickling protocol to use, python-pickle for details. + """ + import cPickle + if isinstance(f, str): + with open(f, 'w') as f: + cPickle.dump(self, f, protocol) + else: + cPickle.dump(self, f, protocol) + def __getstate__(self): + if self._has_get_set_state(): + return self._getstate() + return self.__dict__ + def __setstate__(self, state): + if self._has_get_set_state(): + self._setstate(state) + # TODO: maybe parameters_changed() here? + return + self.__dict__ = state + def _has_get_set_state(self): + return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__) def _getstate(self): """ Returns the state of this class in a memento pattern. @@ -93,70 +153,145 @@ class Pickleable(object): #=============================================================================== class Parentable(object): - _direct_parent_ = None + """ + Enable an Object to have a parent. + + Additionally this adds the parent_index, which is the index for the parent + to look for in its parameter list. + """ + _parent_ = None _parent_index_ = None def has_parent(self): - return self._direct_parent_ is not None - - def _notify_parent_change(self): - for p in self._parameters_: - p._parent_changed(self) + """ + Return whether this parentable object currently has a parent. + """ + return self._parent_ is not None def _parent_changed(self): + """ + Gets called, when the parent changed, so we can adjust our + inner attributes according to the new parent. + """ raise NotImplementedError, "shouldnt happen, Parentable objects need to be able to change their parent" + def _disconnect_parent(self, *args, **kw): + """ + Disconnect this object from its parent + """ + raise NotImplementedError, "Abstaract superclass" + @property def _highest_parent_(self): - if self._direct_parent_ is None: + """ + Gets the highest parent by traversing up to the root node of the hierarchy. + """ + if self._parent_ is None: return self - return self._direct_parent_._highest_parent_ + return self._parent_._highest_parent_ - def _notify_parameters_changed(self): - raise NotImplementedError, "shouldnt happen, abstract superclass" - + def _notify_parent_change(self): + """ + Dont do anything if in leaf node + """ + pass class Nameable(Parentable): + """ + Make an object nameable inside the hierarchy. + """ def __init__(self, name, *a, **kw): super(Nameable, self).__init__(*a, **kw) self._name = name or self.__class__.__name__ @property def name(self): + """ + The name of this object + """ return self._name @name.setter def name(self, name): + """ + Set the name of this object. + Tell the parent if the name has changed. + """ from_name = self.name assert isinstance(name, str) self._name = name if self.has_parent(): - self._direct_parent_._name_changed(self, from_name) + self._parent_._name_changed(self, from_name) def hierarchy_name(self, adjust_for_printing=True): + """ + return the name for this object with the parents names attached by dots. + + :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` + on the names, recursively + """ if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if self.has_parent(): - return self._direct_parent_.hierarchy_name() + "." + adjust(self.name) + return self._parent_.hierarchy_name() + "." + adjust(self.name) return adjust(self.name) class Gradcheckable(Parentable): + """ + Adds the functionality for an object to be gradcheckable. + It is just a thin wrapper of a call to the highest parent for now. + TODO: Can be done better, by only changing parameters of the current parameter handle, + such that object hierarchy only has to change for those. + """ def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): + """ + Check the gradient of this parameter with respect to the highest parent's + objective function. + This is a three point estimate of the gradient, wiggling at the parameters + with a stepsize step. + The check passes if either the ratio or the difference between numerical and + analytical gradient is smaller then tolerance. + + :param bool verbose: whether each parameter shall be checked individually. + :param float step: the stepsize for the numerical three point gradient estimate. + :param flaot tolerance: the tolerance for the gradient ratio or difference. + """ if self.has_parent(): return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance) return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance) def _checkgrad(self, param): + """ + Perform the checkgrad on the model. + TODO: this can be done more efficiently, when doing it inside here + """ raise NotImplementedError, "Need log likelihood to check gradient against" class Indexable(object): + """ + Enable enraveled indexes and offsets for this object. + The raveled index of an object is the index for its parameters in a flattened int array. + """ def _raveled_index(self): + """ + Flattened array of ints, specifying the index of this object. + This has to account for shaped parameters! + """ raise NotImplementedError, "Need to be able to get the raveled Index" def _internal_offset(self): + """ + The offset for this parameter inside its parent. + This has to account for shaped parameters! + """ return 0 def _offset_for(self, param): + """ + Return the offset of the param inside this parameterized object. + This does not need to account for shaped parameters, as it + basically just sums up the parameter sizes which come before param. + """ raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?" def _raveled_index_for(self, param): @@ -169,6 +304,15 @@ class Indexable(object): class Constrainable(Nameable, Indexable): + """ + Make an object constrainable with Priors and Transformations. + TODO: Mappings!! + Adding a constraint to a Parameter means to tell the highest parent that + the constraint was added and making sure that all parameters covered + by this object are indeed conforming to the constraint. + + :func:`constrain()` and :func:`unconstrain()` are main methods here + """ def __init__(self, name, default_constraint=None, *a, **kw): super(Constrainable, self).__init__(name=name, *a, **kw) self._default_constraint_ = default_constraint @@ -178,12 +322,16 @@ class Constrainable(Nameable, Indexable): if self._default_constraint_ is not None: self.constrain(self._default_constraint_) - def _disconnect_parent(self, constr=None): + def _disconnect_parent(self, constr=None, *args, **kw): + """ + From Parentable: + disconnect the parent and set the new constraints to constr + """ if constr is None: constr = self.constraints.copy() self.constraints.clear() self.constraints = constr - self._direct_parent_ = None + self._parent_ = None self._parent_index_ = None self._connect_fixes() self._notify_parent_change() @@ -193,7 +341,7 @@ class Constrainable(Nameable, Indexable): #=========================================================================== def constrain_fixed(self, value=None, warning=True, trigger_parent=True): """ - Constrain this paramter to be fixed to the current value it carries. + Constrain this parameter to be fixed to the current value it carries. :param warning: print a warning for overwriting constraints. """ @@ -237,11 +385,20 @@ class Constrainable(Nameable, Indexable): #=========================================================================== # Prior Operations #=========================================================================== - def set_prior(self, prior, warning=True, trigger_parent=True): + def set_prior(self, prior, warning=True): + """ + Set the prior for this object to prior. + :param :class:`~GPy.priors.Prior` prior: a prior to set for this parameter + :param bool warning: whether to warn if another prior was set for this parameter + """ repriorized = self.unset_priors() self._add_to_index_operations(self.priors, repriorized, prior, warning) def unset_priors(self, *priors): + """ + Un-set all priors given from this parameter handle. + + """ return self._remove_from_index_operations(self.priors, priors) def log_prior(self): @@ -274,7 +431,7 @@ class Constrainable(Nameable, Indexable): :py:class:`GPy.core.transformations.Transformation`. """ if isinstance(transform, Transformation): - self._set_params(transform.initialize(self._get_params()), trigger_parent=trigger_parent) + self._param_array_[:] = transform.initialize(self._param_array_) reconstrained = self.unconstrain() self._add_to_index_operations(self.constraints, reconstrained, transform, warning) @@ -333,6 +490,10 @@ class Constrainable(Nameable, Indexable): self.unconstrain(Logistic(lower, upper)) def _parent_changed(self, parent): + """ + From Parentable: + Called when the parent changed + """ from index_operations import ParameterIndexOperationsView self.constraints = ParameterIndexOperationsView(parent.constraints, parent._offset_for(self), self.size) self.priors = ParameterIndexOperationsView(parent.priors, parent._offset_for(self), self.size) @@ -340,14 +501,25 @@ class Constrainable(Nameable, Indexable): for p in self._parameters_: p._parent_changed(parent) - def _add_to_index_operations(self, which, reconstrained, transform, warning): + def _add_to_index_operations(self, which, reconstrained, what, warning): + """ + Helper preventing copy code. + This addes the given what (transformation, prior etc) to parameter index operations which. + revonstrained are reconstrained indices. + warn when reconstraining parameters if warning is True. + TODO: find out which parameters have changed specifically + """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print "WARNING: reconstraining parameters {}".format(self.parameter_names() or self.name) - which.add(transform, self._raveled_index()) + which.add(what, self._raveled_index()) - def _remove_from_index_operations(self, which, transforms): - if len(transforms) == 0: + def _remove_from_index_operations(self, which, what): + """ + Helper preventing copy code. + Remove given what (transform prior etc) from which param index ops. + """ + if len(what) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in transforms: @@ -359,36 +531,65 @@ class Constrainable(Nameable, Indexable): return removed class OptimizationHandlable(Constrainable, Observable): + """ + This enables optimization handles on an Object as done in GPy 0.4. + + transformed: make sure the transformations and constraints etc are handled + """ + def transform(self): + [np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] + + def untransform(self): + [np.put(self._param_array_, ind, c.f(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] + def _get_params_transformed(self): # transformed parameters (apply transformation rules) - p = self._get_params() + p = self._param_array_.copy() [np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] if self._has_fixes(): return p[self._fixes_] return p - + def _set_params_transformed(self, p): - # inverse apply transformations for parameters and set the resulting parameters - self._set_params(self._untransform_params(p)) + if self._has_fixes(): self._param_array_[self._fixes_] = p.copy() + else: self._param_array_[:] = p.copy() + self.untransform() + self._trigger_params_changed() + + def _trigger_params_changed(self, trigger_parent=True): + [p._trigger_params_changed(trigger_parent=False) for p in self._parameters_] + if trigger_parent: min_priority = None + else: min_priority = -np.inf + self.notify_observers(None, min_priority) def _size_transformed(self): return self.size - self.constraints[__fixed__].size - - def _untransform_params(self, p): - p = p.copy() - if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp - [np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] - return p - - def _get_params(self): +# +# def _untransform_params(self, p): +# # inverse apply transformations for parameters +# #p = p.copy() +# if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp +# [np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] +# return p +# +# def _get_params(self): +# """ +# get all parameters +# """ +# return self._param_array_ +# p = np.empty(self.size, dtype=np.float64) +# if self.size == 0: +# return p +# [np.put(p, ind, par._get_params()) for ind, par in itertools.izip(self._param)] +# return p + +# def _set_params(self, params, trigger_parent=True): +# self._param_array_.flat = params +# if trigger_parent: min_priority = None +# else: min_priority = -np.inf +# self.notify_observers(None, min_priority) # don't overwrite this anymore! - if not self.size: - return np.empty(shape=(0,), dtype=np.float64) - return np.hstack([x._get_params() for x in self._parameters_ if x.size > 0]) - - def _set_params(self, params, trigger_parent=True): - # don't overwrite this anymore! - raise NotImplementedError, "This needs to be implemented in Param and Parametrizable" + #raise NotImplementedError, "Abstract superclass: This needs to be implemented in Param and Parameterizable" #=========================================================================== # Optimization handles: @@ -396,6 +597,7 @@ class OptimizationHandlable(Constrainable, Observable): def _get_param_names(self): n = np.array([p.hierarchy_name() + '[' + str(i) + ']' for p in self.flattened_parameters for i in p._indices()]) return n + def _get_param_names_transformed(self): n = self._get_param_names() if self._has_fixes(): @@ -405,19 +607,16 @@ class OptimizationHandlable(Constrainable, Observable): #=========================================================================== # Randomizeable #=========================================================================== - def randomize(self): + def randomize(self, rand_gen=np.random.normal, loc=0, scale=1, *args, **kwargs): """ Randomize the model. - Make this draw from the prior if one exists, else draw from N(0,1) + Make this draw from the prior if one exists, else draw from given random generator """ # first take care of all parameters (from N(0,1)) - # x = self._get_params_transformed() - x = np.random.randn(self._size_transformed()) - x = self._untransform_params(x) + x = rand_gen(loc=loc, scale=scale, size=self._size_transformed(), *args, **kwargs) # now draw from prior where possible [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] - self._set_params(x) - # self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...) + self._set_params_transformed(x) # makes sure all of the tied parameters get the same init (since there's only one prior object...) class Parameterizable(OptimizationHandlable): def __init__(self, *args, **kwargs): @@ -427,6 +626,13 @@ class Parameterizable(OptimizationHandlable): self._added_names_ = set() def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True): + """ + Get the names of all parameters of this model. + + :param bool add_self: whether to add the own name in front of names + :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names + :param bool recursive: whether to traverse through hierarchy and append leaf node names + """ if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if recursive: names = [xi for x in self._parameters_ for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)] @@ -438,8 +644,11 @@ class Parameterizable(OptimizationHandlable): def num_params(self): return len(self._parameters_) - def _add_parameter_name(self, param): + def _add_parameter_name(self, param, ignore_added_names=False): pname = adjust_name_for_printing(param.name) + if ignore_added_names: + self.__dict__[pname] = param + return # and makes sure to not delete programmatically added parameters if pname in self.__dict__: if not (param is self.__dict__[pname]): @@ -461,28 +670,42 @@ class Parameterizable(OptimizationHandlable): def _name_changed(self, param, old_name): self._remove_parameter_name(None, old_name) self._add_parameter_name(param) - - def _collect_gradient(self, target): - import itertools - [p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] + + #========================================================================= + # Gradient handling + #========================================================================= + @property + def gradient(self): + return self._gradient_array_ + + @gradient.setter + def gradient(self, val): + self._gradient_array_[:] = val + #=========================================================================== + # def _collect_gradient(self, target): + # [p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] + #=========================================================================== - def _set_params(self, params, trigger_parent=True): - import itertools - [p._set_params(params[s], trigger_parent=False) for p, s in itertools.izip(self._parameters_, self._param_slices_)] - if trigger_parent: min_priority = None - else: min_priority = -np.inf - self._notify_observers(None, min_priority) + #=========================================================================== + # def _set_params(self, params, trigger_parent=True): + # [p._set_params(params[s], trigger_parent=False) for p, s in itertools.izip(self._parameters_, self._param_slices_)] + # if trigger_parent: min_priority = None + # else: min_priority = -np.inf + # self.notify_observers(None, min_priority) + #=========================================================================== - def _set_gradient(self, g): - import itertools - [p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] + #=========================================================================== + # def _set_gradient(self, g): + # [p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] + #=========================================================================== - def add_parameter(self, param, index=None): + def add_parameter(self, param, index=None, _ignore_added_names=False): """ :param parameters: the parameters to add :type parameters: list of or one :py:class:`GPy.core.param.Param` :param [index]: index of where to put parameters - + + :param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field Add all parameters to this param class, you can insert parameters at any given index using the :func:`list.insert` syntax @@ -494,12 +717,12 @@ class Parameterizable(OptimizationHandlable): self.add_parameter(param, index) elif param not in self._parameters_: if param.has_parent(): - parent = param._direct_parent_ + parent = param._parent_ while parent is not None: if parent is self: - raise HierarchyError, "You cannot add a parameter twice into the hirarchy" - parent = parent._direct_parent_ - param._direct_parent_.remove_parameter(param) + raise HierarchyError, "You cannot add a parameter twice into the hierarchy" + parent = parent._parent_ + param._parent_.remove_parameter(param) # make sure the size is set if index is None: self.constraints.update(param.constraints, self.size) @@ -517,7 +740,7 @@ class Parameterizable(OptimizationHandlable): self.size += param.size - self._connect_parameters() + self._connect_parameters(ignore_added_names=_ignore_added_names) self._notify_parent_change() self._connect_fixes() else: @@ -551,14 +774,14 @@ class Parameterizable(OptimizationHandlable): self._connect_parameters() self._notify_parent_change() - parent = self._direct_parent_ + parent = self._parent_ while parent is not None: parent._connect_fixes() parent._connect_parameters() parent._notify_parent_change() - parent = parent._direct_parent_ + parent = parent._parent_ - def _connect_parameters(self): + def _connect_parameters(self, ignore_added_names=False): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects # to be used as parameters @@ -567,14 +790,35 @@ class Parameterizable(OptimizationHandlable): if not hasattr(self, "_parameters_") or len(self._parameters_) < 1: # no parameters for this class return - sizes = [0] + old_size = 0 + self._param_array_ = np.empty(self.size, dtype=np.float64) + self._gradient_array_ = np.empty(self.size, dtype=np.float64) + self._param_slices_ = [] + for i, p in enumerate(self._parameters_): - p._direct_parent_ = self + p._parent_ = self p._parent_index_ = i - sizes.append(p.size + sizes[-1]) - self._param_slices_.append(slice(sizes[-2], sizes[-1])) - self._add_parameter_name(p) + + pslice = slice(old_size, old_size+p.size) + pi_old_size = old_size + for pi in p.flattened_parameters: + pislice = slice(pi_old_size, pi_old_size+pi.size) + + self._param_array_[pislice] = pi._param_array_.flat + self._gradient_array_[pislice] = pi._gradient_array_.flat + + pi._param_array_.data = self._param_array_[pislice].data + pi._gradient_array_.data = self._gradient_array_[pislice].data + + pi_old_size += pi.size + + p._param_array_.data = self._param_array_[pslice].data + p._gradient_array_.data = self._gradient_array_[pslice].data + + self._param_slices_.append(pslice) + self._add_parameter_name(p, ignore_added_names=ignore_added_names) + old_size += p.size #=========================================================================== # notification system @@ -582,7 +826,7 @@ class Parameterizable(OptimizationHandlable): def _parameters_changed_notification(self, which): self.parameters_changed() def _pass_through_notify_observers(self, which): - self._notify_observers(which) + self.notify_observers(which) #=========================================================================== # TODO: not working yet @@ -595,7 +839,7 @@ class Parameterizable(OptimizationHandlable): dc = dict() for k, v in self.__dict__.iteritems(): - if k not in ['_direct_parent_', '_parameters_', '_parent_index_', '_observer_callables_'] + self.parameter_names(): + if k not in ['_parent_', '_parameters_', '_parent_index_', '_observer_callables_'] + self.parameter_names(recursive=False): if isinstance(v, (Constrainable, ParameterIndexOperations, ParameterIndexOperationsView)): dc[k] = v.copy() else: @@ -603,7 +847,7 @@ class Parameterizable(OptimizationHandlable): if k == '_parameters_': params = [p.copy() for p in v] - dc['_direct_parent_'] = None + dc['_parent_'] = None dc['_parent_index_'] = None dc['_observer_callables_'] = [] dc['_parameters_'] = ArrayList() @@ -615,11 +859,20 @@ class Parameterizable(OptimizationHandlable): s.__dict__ = dc for p in params: - import ipdb;ipdb.set_trace() - s.add_parameter(p) + s.add_parameter(p, _ignore_added_names=True) return s - + + #=========================================================================== + # From being parentable, we have to define the parent_change notification + #=========================================================================== + def _notify_parent_change(self): + """ + Notify all parameters that the parent has changed + """ + for p in self._parameters_: + p._parent_changed(self) + def parameters_changed(self): """ This method gets called when parameters have changed. diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 3ef99a35..50dd7b3b 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -11,6 +11,12 @@ from parameter_core import Pickleable, Parameterizable, adjust_name_for_printing from transformations import __fixed__ from lists_and_dicts import ArrayList +class ParametersChangedMeta(type): + def __call__(self, *args, **kw): + instance = super(ParametersChangedMeta, self).__call__(*args, **kw) + instance.parameters_changed() + return instance + class Parameterized(Parameterizable, Pickleable, Gradcheckable): """ Parameterized class @@ -53,6 +59,12 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable): If you want to operate on all parameters use m[''] to wildcard select all paramters and concatenate them. Printing m[''] will result in printing of all parameters in detail. """ + #=========================================================================== + # Metaclass for parameters changed after init. + # This makes sure, that parameters changed will always be called after __init__ + # **Never** call parameters_changed() yourself + __metaclass__ = ParametersChangedMeta + #=========================================================================== def __init__(self, name=None, *a, **kw): super(Parameterized, self).__init__(name=name, parent=None, parent_index=None, *a, **kw) self._in_init_ = True @@ -88,39 +100,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable): return G return node - #=========================================================================== - # Pickling operations - #=========================================================================== - def pickle(self, f, protocol=-1): - """ - :param f: either filename or open file object to write to. - if it is an open buffer, you have to make sure to close - it properly. - :param protocol: pickling protocol to use, python-pickle for details. - """ - if isinstance(f, str): - with open(f, 'w') as f: - cPickle.dump(self, f, protocol) - else: - cPickle.dump(self, f, protocol) - - def copy(self): - c = super(Parameterized, self).copy() - c.add_observer(c, c._parameters_changed_notification, -100) - return c - def __getstate__(self): - if self._has_get_set_state(): - return self._getstate() - return self.__dict__ - def __setstate__(self, state): - if self._has_get_set_state(): - self._setstate(state) # set state - # self._set_params(self._get_params()) # restore all values - return - self.__dict__ = state - def _has_get_set_state(self): - return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__) def _getstate(self): """ Get the current state of the class, @@ -149,25 +129,33 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable): self._connect_parameters() self.parameters_changed() #=========================================================================== + # Override copy to handle programmatically added observers + #=========================================================================== + def copy(self): + c = super(Pickleable, self).copy() + c.add_observer(c, c._parameters_changed_notification, -100) + return c + + #=========================================================================== # Gradient control #=========================================================================== def _transform_gradients(self, g): if self.has_parent(): return g - x = self._get_params() - [numpy.put(g, i, g[i] * c.gradfactor(x[i])) for c, i in self.constraints.iteritems() if c != __fixed__] - for p in self.flattened_parameters: - for t, i in p._tied_to_me_.iteritems(): - g[self._offset_for(p) + numpy.array(list(i))] += g[self._raveled_index_for(t)] + [numpy.put(g, i, g[i] * c.gradfactor(self._param_array_[i])) for c, i in self.constraints.iteritems() if c != __fixed__] if self._has_fixes(): return g[self._fixes_] return g + + #=========================================================================== + # Indexable + #=========================================================================== def _offset_for(self, param): # get the offset in the parameterized index array for param if param.has_parent(): - if param._direct_parent_._get_original(param) in self._parameters_: - return self._param_slices_[param._direct_parent_._get_original(param)._parent_index_].start - return self._offset_for(param._direct_parent_) + param._direct_parent_._offset_for(param) + if param._parent_._get_original(param) in self._parameters_: + return self._param_slices_[param._parent_._get_original(param)._parent_index_].start + return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0 def _raveled_index_for(self, param): @@ -229,8 +217,8 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable): return ParamConcatenation(paramlist) def __setitem__(self, name, value, paramlist=None): - if isinstance(name, slice): - self[''][name] = value + if isinstance(name, (slice, tuple, np.ndarray)): + self._param_array_[name] = value else: try: param = self.__getitem__(name, paramlist) except AttributeError as a: raise a diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 60fcc469..5cda8d46 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -10,7 +10,7 @@ import sys #_lim_val = -np.log(sys.float_info.epsilon) _exp_lim_val = np.finfo(np.float64).max -_lim_val = np.log(_exp_lim_val)# +_lim_val = np.log(_exp_lim_val) #=============================================================================== # Fixing constants @@ -57,7 +57,7 @@ class Logexp(Transformation): return np.where(x>_lim_val, x, np.log(1. + np.exp(np.clip(x, -_lim_val, _lim_val)))) #raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x))) def finv(self, f): - return np.where(f>_lim_val, f, np.log(np.exp(f) - 1.)) + return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.)) def gradfactor(self, f): return np.where(f>_lim_val, 1., 1 - np.exp(-f)) def initialize(self, f): diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 4230d8ba..f4f34a5e 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -48,7 +48,6 @@ class SparseGP(GP): GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) self.add_parameter(self.Z, index=0) - self.parameters_changed() def has_uncertain_inputs(self): return isinstance(self.X, VariationalPosterior) @@ -60,11 +59,9 @@ class SparseGP(GP): #gradients wrt kernel dL_dKmm = self.grad_dict.pop('dL_dKmm') self.kern.update_gradients_full(dL_dKmm, self.Z, None) - target = np.zeros(self.kern.size) - self.kern._collect_gradient(target) + target = self.kern.gradient.copy() self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) - self.kern._collect_gradient(target) - self.kern._set_gradient(target) + self.kern.gradient += target #gradients wrt Z self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z) @@ -72,14 +69,12 @@ class SparseGP(GP): self.grad_dict['dL_dpsi1'], self.grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X) else: #gradients wrt kernel - target = np.zeros(self.kern.size) self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X) - self.kern._collect_gradient(target) + target = self.kern.gradient.copy() self.kern.update_gradients_full(self.grad_dict['dL_dKnm'], self.X, self.Z) - self.kern._collect_gradient(target) + target += self.kern.gradient self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z, None) - self.kern._collect_gradient(target) - self.kern._set_gradient(target) + self.kern.gradient += target #gradients wrt Z self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) diff --git a/GPy/examples/__init__.py b/GPy/examples/__init__.py index 2f74858a..87548553 100644 --- a/GPy/examples/__init__.py +++ b/GPy/examples/__init__.py @@ -6,3 +6,4 @@ import regression import dimensionality_reduction import tutorials import stochastic +import non_gaussian \ No newline at end of file diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 64707298..1d998fcb 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -19,12 +19,16 @@ class VarDTC(object): """ const_jitter = 1e-6 - def __init__(self): + def __init__(self, limit=1): #self._YYTfactor_cache = caching.cache() from ...util.caching import Cacher - self.get_trYYT = Cacher(self._get_trYYT, 1) - self.get_YYTfactor = Cacher(self._get_YYTfactor, 1) + self.get_trYYT = Cacher(self._get_trYYT, limit) + self.get_YYTfactor = Cacher(self._get_YYTfactor, limit) + def set_limit(self, limit): + self.get_trYYT.limit = limit + self.get_YYTfactor.limit = limit + def _get_trYYT(self, Y): return param_to_array(np.sum(np.square(Y))) @@ -175,11 +179,14 @@ class VarDTC(object): return post, log_marginal, grad_dict class VarDTCMissingData(object): - def __init__(self): + def __init__(self, limit=1): from ...util.caching import Cacher - self._Y = Cacher(self._subarray_computations, 1) + self._Y = Cacher(self._subarray_computations, limit) pass + def set_limit(self, limit): + self._Y.limit = limit + def _subarray_computations(self, Y): inan = np.isnan(Y) has_none = inan.any() diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 6679eba4..1381b611 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -51,8 +51,6 @@ class Coregionalize(Kern): assert kappa.shape==(self.output_dim, ) self.kappa = Param('kappa', kappa, Logexp()) self.add_parameters(self.W, self.kappa) - self.parameters_changed() - def parameters_changed(self): self.B = np.dot(self.W, self.W.T) + np.diag(self.kappa) diff --git a/GPy/kern/_src/periodic.py b/GPy/kern/_src/periodic.py index e4e659a2..36ff3527 100644 --- a/GPy/kern/_src/periodic.py +++ b/GPy/kern/_src/periodic.py @@ -34,7 +34,6 @@ class Periodic(Kern): self.lengthscale = Param('lengthscale', np.float64(lengthscale), Logexp()) self.period = Param('period', np.float64(period), Logexp()) self.add_parameters(self.variance, self.lengthscale, self.period) - self.parameters_changed() def _cos(self, alpha, omega, phase): def f(x): diff --git a/GPy/kern/_src/sympykern.py b/GPy/kern/_src/sympykern.py index 0688682a..920f47f3 100644 --- a/GPy/kern/_src/sympykern.py +++ b/GPy/kern/_src/sympykern.py @@ -124,9 +124,6 @@ class Sympykern(Kern): # generate the code for the covariance functions self._gen_code() - self.parameters_changed() # initializes caches - - def __add__(self,other): return spkern(self._sp_k+other._sp_k) diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 8763426a..3617e260 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -49,7 +49,6 @@ class BayesianGPLVM(SparseGP): SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs) self.add_parameter(self.X, index=0) - self.parameters_changed() def _getstate(self): """ diff --git a/GPy/models/gp_regression.py b/GPy/models/gp_regression.py index f8957906..5e83db09 100644 --- a/GPy/models/gp_regression.py +++ b/GPy/models/gp_regression.py @@ -28,7 +28,6 @@ class GPRegression(GP): likelihood = likelihoods.Gaussian() super(GPRegression, self).__init__(X, Y, kernel, likelihood, name='GP regression') - self.parameters_changed() def _getstate(self): return GP._getstate(self) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 0423aecd..dd1c44ba 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -1,14 +1,17 @@ # ## Copyright (c) 2013, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from GPy.core import Model -from GPy.core import SparseGP -from GPy.util.linalg import PCA -import numpy +import numpy as np import itertools import pylab -from GPy.kern import Kern -from GPy.models.bayesian_gplvm import BayesianGPLVM + +from ..core import Model, SparseGP +from ..util.linalg import PCA +from ..kern import Kern +from bayesian_gplvm import BayesianGPLVM +from ..core.parameterization.variational import NormalPosterior, NormalPrior +from ..inference.latent_function_inference.var_dtc import VarDTCMissingData +from ..likelihoods.gaussian import Gaussian class MRD2(Model): """ @@ -20,11 +23,101 @@ class MRD2(Model): to match up, whereas the dimensionality p_d can differ. :param [array-like] Ylist: List of datasets to apply MRD on - :param array-like q_mean: mean of starting latent space q in [n x q] - :param array-like q_variance: variance of starting latent space q in [n x q] - :param :class:`~GPy.inference.latent_function_inference + :param input_dim: latent dimensionality + :type input_dim: int + :param array-like X: mean of starting latent space q in [n x q] + :param array-like X_variance: variance of starting latent space q in [n x q] + :param initx: initialisation method for the latent space : + + * 'concat' - PCA on concatenation of all datasets + * 'single' - Concatenation of PCA on datasets, respectively + * 'random' - Random draw from a Normal(0,1) + + :type initx: ['concat'|'single'|'random'] + :param initz: initialisation method for inducing inputs + :type initz: 'permute'|'random' + :param num_inducing: number of inducing inputs to use + :param Z: initial inducing inputs + :param kernel: list of kernels or kernel to copy for each output + :type kernel: [GPy.kern.kern] | GPy.kern.kern | None (default) + :param :class:`~GPy.inference.latent_function_inference inference_method: the inference method to use + :param :class:`~GPy.likelihoods.likelihood.Likelihood` likelihood: the likelihood to use + :param str name: the name of this model """ + def __init__(self, Ylist, input_dim, X=None, X_variance=None, + initx = 'PCA', initz = 'permute', + num_inducing=10, Z=None, kernel=None, + inference_method=None, likelihood=None, name='mrd'): + super(MRD2, self).__init__(name) + + # sort out the kernels + if kernel is None: + from ..kern import RBF + self.kern = [RBF(input_dim, ARD=1, name='Y_{}'.format(i)) for i in range(len(Ylist))] + elif isinstance(kernel, Kern): + self.kern = [kernel.copy(name='Y_{}'.format(i)) for i in range(len(Ylist))] + else: + assert len(kernel) == len(Ylist), "need one kernel per output" + assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!" + + self.input_dim = input_dim + self.num_inducing = num_inducing + + self._in_init_ = True + X = self._init_X(initx, Ylist) + self.Z = self._init_Z(initz, X) + self.num_inducing = self.Z.shape[0] # ensure M==N if M>N + + if X_variance is None: + X_variance = np.random.uniform(0,.2,X.shape) + + self.variational_prior = NormalPrior() + self.X = NormalPosterior(X, X_variance) + + if likelihood is None: + likelihood = Gaussian() + + if inference_method is None: + if any(np.any(np.isnan(y)) for y in Ylist): + self.inference_method = VarDTCMissingData(limit=len(Ylist)) + + self.Ylist = Ylist + + def parameters_changed(self): + for y in self.Ylist: + pass + + def _init_X(self, init='PCA', likelihood_list=None): + if likelihood_list is None: + likelihood_list = self.likelihood_list + Ylist = [] + for likelihood_or_Y in likelihood_list: + if type(likelihood_or_Y) is np.ndarray: + Ylist.append(likelihood_or_Y) + else: + Ylist.append(likelihood_or_Y.Y) + del likelihood_list + if init in "PCA_concat": + X = PCA(np.hstack(Ylist), self.input_dim)[0] + elif init in "PCA_single": + X = np.zeros((Ylist[0].shape[0], self.input_dim)) + for qs, Y in itertools.izip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist): + X[:, qs] = PCA(Y, len(qs))[0] + else: # init == 'random': + X = np.random.randn(Ylist[0].shape[0], self.input_dim) + self.X = X + return X + + def _init_Z(self, init="permute", X=None): + if X is None: + X = self.X + if init in "permute": + Z = np.random.permutation(X.copy())[:self.num_inducing] + elif init in "random": + Z = np.random.randn(self.num_inducing, self.input_dim) * X.var() + self.Z = Z + return Z class MRD(Model): """ @@ -84,7 +177,7 @@ class MRD(Model): del self._in_init_ self.gref = self.bgplvms[0] - nparams = numpy.array([0] + [SparseGP._get_params(g).size - g.Z.size for g in self.bgplvms]) + nparams = np.array([0] + [SparseGP._get_params(g).size - g.Z.size for g in self.bgplvms]) self.nparams = nparams.cumsum() self.num_data = self.gref.num_data @@ -216,7 +309,7 @@ class MRD(Model): X_var = self.gref.X_variance.ravel() Z = self.gref.Z.ravel() thetas = [SparseGP._get_params(g)[g.Z.size:] for g in self.bgplvms] - params = numpy.hstack([X, X_var, Z, numpy.hstack(thetas)]) + params = np.hstack([X, X_var, Z, np.hstack(thetas)]) return params # def _set_var_params(self, g, X, X_var, Z): @@ -239,13 +332,13 @@ class MRD(Model): # set params for all: for g, s, e in itertools.izip(self.bgplvms, self.nparams, self.nparams[1:]): - g._set_params(numpy.hstack([X, X_var, Z, thetas[s:e]])) + g._set_params(np.hstack([X, X_var, Z, thetas[s:e]])) # self._set_var_params(g, X, X_var, Z) # self._set_kern_params(g, thetas[s:e].copy()) # g._compute_kernel_matrices() # if self.auto_scale_factor: -# g.scale_factor = numpy.sqrt(g.psi2.sum(0).mean() * g.likelihood.precision) -# # self.scale_factor = numpy.sqrt(self.psi2.sum(0).mean() * self.likelihood.precision) +# g.scale_factor = np.sqrt(g.psi2.sum(0).mean() * g.likelihood.precision) +# # self.scale_factor = np.sqrt(self.psi2.sum(0).mean() * self.likelihood.precision) # g._computations() @@ -264,48 +357,18 @@ class MRD(Model): dKLmu, dKLdS = self.gref.dKL_dmuS() dLdmu -= dKLmu dLdS -= dKLdS - dLdmuS = numpy.hstack((dLdmu.flatten(), dLdS.flatten())).flatten() + dLdmuS = np.hstack((dLdmu.flatten(), dLdS.flatten())).flatten() dldzt1 = reduce(lambda a, b: a + b, (SparseGP._log_likelihood_gradients(g)[:self.MQ] for g in self.bgplvms)) - return numpy.hstack((dLdmuS, + return np.hstack((dLdmuS, dldzt1, - numpy.hstack([numpy.hstack([g.dL_dtheta(), + np.hstack([np.hstack([g.dL_dtheta(), g.likelihood._gradients(\ partial=g.partial_for_likelihood)]) \ for g in self.bgplvms]))) - def _init_X(self, init='PCA', likelihood_list=None): - if likelihood_list is None: - likelihood_list = self.likelihood_list - Ylist = [] - for likelihood_or_Y in likelihood_list: - if type(likelihood_or_Y) is numpy.ndarray: - Ylist.append(likelihood_or_Y) - else: - Ylist.append(likelihood_or_Y.Y) - del likelihood_list - if init in "PCA_concat": - X = PCA(numpy.hstack(Ylist), self.input_dim)[0] - elif init in "PCA_single": - X = numpy.zeros((Ylist[0].shape[0], self.input_dim)) - for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.input_dim), len(Ylist)), Ylist): - X[:, qs] = PCA(Y, len(qs))[0] - else: # init == 'random': - X = numpy.random.randn(Ylist[0].shape[0], self.input_dim) - self.X = X - return X - def _init_Z(self, init="permute", X=None): - if X is None: - X = self.X - if init in "permute": - Z = numpy.random.permutation(X.copy())[:self.num_inducing] - elif init in "random": - Z = numpy.random.randn(self.num_inducing, self.input_dim) * X.var() - self.Z = Z - return Z - def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False): if axes is None: fig = pylab.figure(num=fignum) @@ -358,7 +421,7 @@ class MRD(Model): """ if titles is None: titles = [r'${}$'.format(name) for name in self.names] - ymax = reduce(max, [numpy.ceil(max(g.input_sensitivity())) for g in self.bgplvms]) + ymax = reduce(max, [np.ceil(max(g.input_sensitivity())) for g in self.bgplvms]) def plotf(i, g, ax): ax.set_ylim([0,ymax]) g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs) diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index b3227e43..2b990611 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -23,7 +23,7 @@ def add_bar_labels(fig, ax, bars, bottom=0): xi = patch.get_x() + patch.get_width() / 2. va = 'top' c = 'w' - t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, usetex=True, ha='center') + t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, ha='center') transform = transOffset if patch.get_extents().height <= t.get_extents().height + 3: va = 'bottom' diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index d4105e3c..d2a236dd 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -10,7 +10,7 @@ from functools import partial #np.random.seed(300) #np.random.seed(7) -np.seterr(divide='raise') +#np.seterr(divide='raise') def dparam_partial(inst_func, *args): """ If we have a instance method that needs to be called but that doesn't @@ -350,7 +350,7 @@ class TestNoiseModels(object): def t_logpdf(self, model, Y, f): print "\n{}".format(inspect.stack()[0][3]) print model - print model._get_params() + #print model._get_params() np.testing.assert_almost_equal( model.pdf(f.copy(), Y.copy()), np.exp(model.logpdf(f.copy(), Y.copy())) @@ -664,7 +664,8 @@ class LaplaceTests(unittest.TestCase): print m1 print m2 - m2._set_params(m1._get_params()) + m2.parameters_changed() + #m2._set_params(m1._get_params()) #Predict for training points to get posterior mean and variance post_mean, post_var, _, _ = m1.predict(X) @@ -700,7 +701,8 @@ class LaplaceTests(unittest.TestCase): np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check marginals are the same with random m1.randomize() - m2._set_params(m1._get_params()) + #m2._set_params(m1._get_params()) + m2.parameters_changed() np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check they are checkgradding diff --git a/GPy/testing/observable_tests.py b/GPy/testing/observable_tests.py index 4123f58a..ebda1630 100644 --- a/GPy/testing/observable_tests.py +++ b/GPy/testing/observable_tests.py @@ -8,14 +8,17 @@ from GPy.core.parameterization.parameterized import Parameterized from GPy.core.parameterization.param import Param import numpy +# One trigger in init +_trigger_start = -1 class ParamTestParent(Parameterized): - parent_changed_count = 0 + parent_changed_count = _trigger_start def parameters_changed(self): self.parent_changed_count += 1 class ParameterizedTest(Parameterized): - params_changed_count = 0 + # One trigger after initialization + params_changed_count = _trigger_start def parameters_changed(self): self.params_changed_count += 1 def _set_params(self, params, trigger_parent=True): @@ -92,29 +95,31 @@ class Test(unittest.TestCase): def test_set_params(self): self.assertEqual(self.par.params_changed_count, 0, 'no params changed yet') - self.par._set_params(numpy.ones(self.par.size)) + self.par._param_array_[:] = 1 + self.par._trigger_params_changed() self.assertEqual(self.par.params_changed_count, 1, 'now params changed') self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count) - self.parent._set_params(numpy.ones(self.parent.size) * 2) + self.par._param_array_[:] = 2 + self.par._trigger_params_changed() self.assertEqual(self.par.params_changed_count, 2, 'now params changed') self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count) def test_priority_notify(self): self.assertEqual(self.par.params_changed_count, 0) - self.par._notify_observers(0, None) + self.par.notify_observers(0, None) self.assertEqual(self.par.params_changed_count, 1) self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count) - self.par._notify_observers(0, -numpy.inf) + self.par.notify_observers(0, -numpy.inf) self.assertEqual(self.par.params_changed_count, 2) self.assertEqual(self.parent.parent_changed_count, 1) def test_priority(self): self.par.add_observer(self, self._trigger, -1) self.par.add_observer(self, self._trigger_priority, 0) - self.par._notify_observers(0) + self.par.notify_observers(0) self.assertEqual(self._first, self._trigger_priority, 'priority should be first') self.assertEqual(self._second, self._trigger, 'priority should be first') @@ -123,7 +128,7 @@ class Test(unittest.TestCase): self.par.add_observer(self, self._trigger, 1) self.par.add_observer(self, self._trigger_priority, 0) - self.par._notify_observers(0) + self.par.notify_observers(0) self.assertEqual(self._first, self._trigger, 'priority should be second') self.assertEqual(self._second, self._trigger_priority, 'priority should be second') diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 0da3f3ae..b2f57144 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -22,6 +22,10 @@ class Test(unittest.TestCase): self.test1.add_parameter(self.rbf, 0) self.test1.add_parameter(self.param) + x = np.linspace(-2,6,4)[:,None] + y = np.sin(x) + self.testmodel = GPy.models.GPRegression(x,y) + def test_add_parameter(self): self.assertEquals(self.rbf._parent_index_, 0) self.assertEquals(self.white._parent_index_, 1) @@ -38,7 +42,6 @@ class Test(unittest.TestCase): self.test1.add_parameter(self.white, 0) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED,UNFIXED,UNFIXED]) - def test_remove_parameter(self): from GPy.core.parameterization.transformations import FIXED, UNFIXED, __fixed__, Logexp self.white.fix() @@ -89,6 +92,18 @@ class Test(unittest.TestCase): self.assertEqual(self.rbf.constraints._offset, 0) self.assertEqual(self.param.constraints._offset, 3) + def test_fixing_randomize(self): + self.white.fix(warning=False) + val = float(self.test1.white.variance) + self.test1.randomize() + self.assertEqual(val, self.white.variance) + + def test_fixing_optimize(self): + self.testmodel.kern.lengthscale.fix() + val = float(self.testmodel.kern.lengthscale) + self.testmodel.randomize() + self.assertEqual(val, self.testmodel.kern.lengthscale) + if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_add_parameter'] unittest.main() \ No newline at end of file