Dont call parameters_changed ever yourself anymore and parameters are now inplace once in memory

This commit is contained in:
Max Zwiessele 2014-03-04 17:32:46 +00:00
parent 56d749ded8
commit 0df263956f
21 changed files with 601 additions and 284 deletions

View file

@ -49,7 +49,7 @@ class ObservableArray(np.ndarray, Observable):
def __setitem__(self, s, val):
if self._s_not_empty(s):
super(ObservableArray, self).__setitem__(s, val)
self._notify_observers(self[s])
self.notify_observers(self[s])
def __getslice__(self, start, stop):
return self.__getitem__(slice(start, stop))
@ -65,149 +65,149 @@ class ObservableArray(np.ndarray, Observable):
def __ilshift__(self, *args, **kwargs):
r = np.ndarray.__ilshift__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __irshift__(self, *args, **kwargs):
r = np.ndarray.__irshift__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __ixor__(self, *args, **kwargs):
r = np.ndarray.__ixor__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __ipow__(self, *args, **kwargs):
r = np.ndarray.__ipow__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __ifloordiv__(self, *args, **kwargs):
r = np.ndarray.__ifloordiv__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __isub__(self, *args, **kwargs):
r = np.ndarray.__isub__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __ior__(self, *args, **kwargs):
r = np.ndarray.__ior__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __itruediv__(self, *args, **kwargs):
r = np.ndarray.__itruediv__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __idiv__(self, *args, **kwargs):
r = np.ndarray.__idiv__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __iand__(self, *args, **kwargs):
r = np.ndarray.__iand__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __imod__(self, *args, **kwargs):
r = np.ndarray.__imod__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __iadd__(self, *args, **kwargs):
r = np.ndarray.__iadd__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
def __imul__(self, *args, **kwargs):
r = np.ndarray.__imul__(self, *args, **kwargs)
self._notify_observers()
self.notify_observers()
return r
# def __rrshift__(self, *args, **kwargs):
# r = np.ndarray.__rrshift__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __ror__(self, *args, **kwargs):
# r = np.ndarray.__ror__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rxor__(self, *args, **kwargs):
# r = np.ndarray.__rxor__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rdivmod__(self, *args, **kwargs):
# r = np.ndarray.__rdivmod__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __radd__(self, *args, **kwargs):
# r = np.ndarray.__radd__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rdiv__(self, *args, **kwargs):
# r = np.ndarray.__rdiv__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rtruediv__(self, *args, **kwargs):
# r = np.ndarray.__rtruediv__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rshift__(self, *args, **kwargs):
# r = np.ndarray.__rshift__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rmul__(self, *args, **kwargs):
# r = np.ndarray.__rmul__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rpow__(self, *args, **kwargs):
# r = np.ndarray.__rpow__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rsub__(self, *args, **kwargs):
# r = np.ndarray.__rsub__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r
# def __rfloordiv__(self, *args, **kwargs):
# r = np.ndarray.__rfloordiv__(self, *args, **kwargs)
# self._notify_observers()
# self.notify_observers()
# return r

View file

@ -62,6 +62,7 @@ class ParameterIndexOperations(object):
def clear(self):
self._properties.clear()
@property
def size(self):
return reduce(lambda a,b: a+b.size, self.iterindices(), 0)
@ -165,7 +166,7 @@ class ParameterIndexOperationsView(object):
for i, ind in self.items():
self._param_index_ops.remove(i, ind+self._offset)
@property
def size(self):
return reduce(lambda a,b: a+b.size, self.iterindices(), 0)

View file

@ -54,7 +54,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
obj._tied_to_me_ = SetDict()
obj._tied_to_ = []
obj._original_ = True
obj._gradient_ = None
obj._gradient_array_ = numpy.zeros(obj.shape, dtype=numpy.float64)
return obj
def __init__(self, name, input_array, default_constraint=None, *a, **kw):
@ -77,7 +77,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
super(Param, self).__array_finalize__(obj)
self._direct_parent_ = getattr(obj, '_direct_parent_', None)
self._parent_ = getattr(obj, '_parent_', None)
self._parent_index_ = getattr(obj, '_parent_index_', None)
self._default_constraint_ = getattr(obj, '_default_constraint_', None)
self._current_slice_ = getattr(obj, '_current_slice_', None)
@ -89,16 +89,18 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
self._updated_ = getattr(obj, '_updated_', None)
self._original_ = getattr(obj, '_original_', None)
self._name = getattr(obj, 'name', None)
self._gradient_ = getattr(obj, '_gradient_', None)
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
self.constraints = getattr(obj, 'constraints', None)
self.priors = getattr(obj, 'priors', None)
@property
def _param_array_(self):
return self
@property
def gradient(self):
if self._gradient_ is None:
self._gradient_ = numpy.zeros(self._realshape_)
return self._gradient_[self._current_slice_]
return self._gradient_array_[self._current_slice_]
@gradient.setter
def gradient(self, val):
self.gradient[:] = val
@ -110,7 +112,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
func, args, state = super(Param, self).__reduce__()
return func, args, (state,
(self.name,
self._direct_parent_,
self._parent_,
self._parent_index_,
self._default_constraint_,
self._current_slice_,
@ -135,7 +137,7 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
self._current_slice_ = state.pop()
self._default_constraint_ = state.pop()
self._parent_index_ = state.pop()
self._direct_parent_ = state.pop()
self._parent_ = state.pop()
self.name = state.pop()
def copy(self, *args):
@ -148,20 +150,20 @@ class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
#===========================================================================
# get/set parameters
#===========================================================================
def _set_params(self, param, trigger_parent=True):
self.flat = param
if trigger_parent: min_priority = None
else: min_priority = -numpy.inf
self._notify_observers(None, min_priority)
def _get_params(self):
return self.flat
def _collect_gradient(self, target):
target += self.gradient.flat
def _set_gradient(self, g):
self.gradient = g.reshape(self._realshape_)
# def _set_params(self, param, trigger_parent=True):
# self.flat = param
# if trigger_parent: min_priority = None
# else: min_priority = -numpy.inf
# self.notify_observers(None, min_priority)
#
# def _get_params(self):
# return self.flat
#
# def _collect_gradient(self, target):
# target += self.gradient.flat
#
# def _set_gradient(self, g):
# self.gradient = g.reshape(self._realshape_)
#===========================================================================
# Array operations -> done
@ -362,7 +364,7 @@ class ParamConcatenation(object):
parents = dict()
for p in self.params:
if p.has_parent():
parent = p._direct_parent_
parent = p._parent_
level = 0
while parent is not None:
if parent in parents:
@ -370,7 +372,7 @@ class ParamConcatenation(object):
else:
parents[parent] = level
level += 1
parent = parent._direct_parent_
parent = parent._parent_
import operator
self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
#===========================================================================
@ -397,7 +399,7 @@ class ParamConcatenation(object):
#===========================================================================
def update_all_params(self):
for par in self.parents:
par._notify_observers(-numpy.inf)
par.notify_observers(-numpy.inf)
def constrain(self, constraint, warning=True):
[param.constrain(constraint, trigger_parent=False) for param in self.params]

View file

@ -1,26 +1,51 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Core module for parameterization.
This module implements all parameterization techniques, split up in modular bits.
HierarchyError:
raised when an error with the hierarchy occurs (circles etc.)
Observable:
Observable Pattern for patameterization
"""
from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
import numpy as np
import itertools
__updated__ = '2013-12-16'
class HierarchyError(Exception):
"""
Gets thrown when something is wrong with the parameter hierarchy
Gets thrown when something is wrong with the parameter hierarchy.
"""
def adjust_name_for_printing(name):
"""
Make sure a name can be printed, alongside used as a variable name.
"""
if name is not None:
return name.replace(" ", "_").replace(".", "_").replace("-", "").replace("+", "").replace("!", "").replace("*", "").replace("/", "")
return name.replace(" ", "_").replace(".", "_").replace("-", "_m_").replace("+", "_p_").replace("!", "_I_").replace("**", "_xx_").replace("*", "_x_").replace("/", "_l_").replace("@",'_at_')
return ''
class Observable(object):
"""
Observable pattern for parameterization.
This Object allows for observers to register with self and a (bound!) function
as an observer. Every time the observable changes, it sends a notification with
self as only argument to all its observers.
"""
_updated = True
def __init__(self, *args, **kwargs):
self._observer_callables_ = []
def __del__(self, *args, **kwargs):
del self._observer_callables_
def add_observer(self, observer, callble, priority=0):
self._insert_sorted(priority, observer, callble)
@ -35,8 +60,8 @@ class Observable(object):
to_remove.append((p, obs, clble))
for r in to_remove:
self._observer_callables_.remove(r)
def _notify_observers(self, which=None, min_priority=None):
def notify_observers(self, which=None, min_priority=None):
"""
Notifies all observers. Which is the element, which kicked off this
notification loop.
@ -67,6 +92,41 @@ class Observable(object):
self._observer_callables_.insert(ins, (p, o, c))
class Pickleable(object):
"""
Make an object pickleable (See python doc 'pickling').
This class allows for pickling support by Memento pattern.
_getstate returns a memento of the class, which gets pickled.
_setstate(<memento>) (re-)sets the state of the class to the memento
"""
#===========================================================================
# Pickling operations
#===========================================================================
def pickle(self, f, protocol=-1):
"""
:param f: either filename or open file object to write to.
if it is an open buffer, you have to make sure to close
it properly.
:param protocol: pickling protocol to use, python-pickle for details.
"""
import cPickle
if isinstance(f, str):
with open(f, 'w') as f:
cPickle.dump(self, f, protocol)
else:
cPickle.dump(self, f, protocol)
def __getstate__(self):
if self._has_get_set_state():
return self._getstate()
return self.__dict__
def __setstate__(self, state):
if self._has_get_set_state():
self._setstate(state)
# TODO: maybe parameters_changed() here?
return
self.__dict__ = state
def _has_get_set_state(self):
return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__)
def _getstate(self):
"""
Returns the state of this class in a memento pattern.
@ -93,70 +153,145 @@ class Pickleable(object):
#===============================================================================
class Parentable(object):
_direct_parent_ = None
"""
Enable an Object to have a parent.
Additionally this adds the parent_index, which is the index for the parent
to look for in its parameter list.
"""
_parent_ = None
_parent_index_ = None
def has_parent(self):
return self._direct_parent_ is not None
def _notify_parent_change(self):
for p in self._parameters_:
p._parent_changed(self)
"""
Return whether this parentable object currently has a parent.
"""
return self._parent_ is not None
def _parent_changed(self):
"""
Gets called, when the parent changed, so we can adjust our
inner attributes according to the new parent.
"""
raise NotImplementedError, "shouldnt happen, Parentable objects need to be able to change their parent"
def _disconnect_parent(self, *args, **kw):
"""
Disconnect this object from its parent
"""
raise NotImplementedError, "Abstaract superclass"
@property
def _highest_parent_(self):
if self._direct_parent_ is None:
"""
Gets the highest parent by traversing up to the root node of the hierarchy.
"""
if self._parent_ is None:
return self
return self._direct_parent_._highest_parent_
return self._parent_._highest_parent_
def _notify_parameters_changed(self):
raise NotImplementedError, "shouldnt happen, abstract superclass"
def _notify_parent_change(self):
"""
Dont do anything if in leaf node
"""
pass
class Nameable(Parentable):
"""
Make an object nameable inside the hierarchy.
"""
def __init__(self, name, *a, **kw):
super(Nameable, self).__init__(*a, **kw)
self._name = name or self.__class__.__name__
@property
def name(self):
"""
The name of this object
"""
return self._name
@name.setter
def name(self, name):
"""
Set the name of this object.
Tell the parent if the name has changed.
"""
from_name = self.name
assert isinstance(name, str)
self._name = name
if self.has_parent():
self._direct_parent_._name_changed(self, from_name)
self._parent_._name_changed(self, from_name)
def hierarchy_name(self, adjust_for_printing=True):
"""
return the name for this object with the parents names attached by dots.
:param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`
on the names, recursively
"""
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
else: adjust = lambda x: x
if self.has_parent():
return self._direct_parent_.hierarchy_name() + "." + adjust(self.name)
return self._parent_.hierarchy_name() + "." + adjust(self.name)
return adjust(self.name)
class Gradcheckable(Parentable):
"""
Adds the functionality for an object to be gradcheckable.
It is just a thin wrapper of a call to the highest parent for now.
TODO: Can be done better, by only changing parameters of the current parameter handle,
such that object hierarchy only has to change for those.
"""
def __init__(self, *a, **kw):
super(Gradcheckable, self).__init__(*a, **kw)
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
"""
Check the gradient of this parameter with respect to the highest parent's
objective function.
This is a three point estimate of the gradient, wiggling at the parameters
with a stepsize step.
The check passes if either the ratio or the difference between numerical and
analytical gradient is smaller then tolerance.
:param bool verbose: whether each parameter shall be checked individually.
:param float step: the stepsize for the numerical three point gradient estimate.
:param flaot tolerance: the tolerance for the gradient ratio or difference.
"""
if self.has_parent():
return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance)
return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance)
def _checkgrad(self, param):
"""
Perform the checkgrad on the model.
TODO: this can be done more efficiently, when doing it inside here
"""
raise NotImplementedError, "Need log likelihood to check gradient against"
class Indexable(object):
"""
Enable enraveled indexes and offsets for this object.
The raveled index of an object is the index for its parameters in a flattened int array.
"""
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
raise NotImplementedError, "Need to be able to get the raveled Index"
def _internal_offset(self):
"""
The offset for this parameter inside its parent.
This has to account for shaped parameters!
"""
return 0
def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?"
def _raveled_index_for(self, param):
@ -169,6 +304,15 @@ class Indexable(object):
class Constrainable(Nameable, Indexable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!!
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Constrainable, self).__init__(name=name, *a, **kw)
self._default_constraint_ = default_constraint
@ -178,12 +322,16 @@ class Constrainable(Nameable, Indexable):
if self._default_constraint_ is not None:
self.constrain(self._default_constraint_)
def _disconnect_parent(self, constr=None):
def _disconnect_parent(self, constr=None, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
if constr is None:
constr = self.constraints.copy()
self.constraints.clear()
self.constraints = constr
self._direct_parent_ = None
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
@ -193,7 +341,7 @@ class Constrainable(Nameable, Indexable):
#===========================================================================
def constrain_fixed(self, value=None, warning=True, trigger_parent=True):
"""
Constrain this paramter to be fixed to the current value it carries.
Constrain this parameter to be fixed to the current value it carries.
:param warning: print a warning for overwriting constraints.
"""
@ -237,11 +385,20 @@ class Constrainable(Nameable, Indexable):
#===========================================================================
# Prior Operations
#===========================================================================
def set_prior(self, prior, warning=True, trigger_parent=True):
def set_prior(self, prior, warning=True):
"""
Set the prior for this object to prior.
:param :class:`~GPy.priors.Prior` prior: a prior to set for this parameter
:param bool warning: whether to warn if another prior was set for this parameter
"""
repriorized = self.unset_priors()
self._add_to_index_operations(self.priors, repriorized, prior, warning)
def unset_priors(self, *priors):
"""
Un-set all priors given from this parameter handle.
"""
return self._remove_from_index_operations(self.priors, priors)
def log_prior(self):
@ -274,7 +431,7 @@ class Constrainable(Nameable, Indexable):
:py:class:`GPy.core.transformations.Transformation`.
"""
if isinstance(transform, Transformation):
self._set_params(transform.initialize(self._get_params()), trigger_parent=trigger_parent)
self._param_array_[:] = transform.initialize(self._param_array_)
reconstrained = self.unconstrain()
self._add_to_index_operations(self.constraints, reconstrained, transform, warning)
@ -333,6 +490,10 @@ class Constrainable(Nameable, Indexable):
self.unconstrain(Logistic(lower, upper))
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
"""
from index_operations import ParameterIndexOperationsView
self.constraints = ParameterIndexOperationsView(parent.constraints, parent._offset_for(self), self.size)
self.priors = ParameterIndexOperationsView(parent.priors, parent._offset_for(self), self.size)
@ -340,14 +501,25 @@ class Constrainable(Nameable, Indexable):
for p in self._parameters_:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, transform, warning):
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This addes the given what (transformation, prior etc) to parameter index operations which.
revonstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print "WARNING: reconstraining parameters {}".format(self.parameter_names() or self.name)
which.add(transform, self._raveled_index())
which.add(what, self._raveled_index())
def _remove_from_index_operations(self, which, transforms):
if len(transforms) == 0:
def _remove_from_index_operations(self, which, what):
"""
Helper preventing copy code.
Remove given what (transform prior etc) from which param index ops.
"""
if len(what) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
for t in transforms:
@ -359,36 +531,65 @@ class Constrainable(Nameable, Indexable):
return removed
class OptimizationHandlable(Constrainable, Observable):
"""
This enables optimization handles on an Object as done in GPy 0.4.
transformed: make sure the transformations and constraints etc are handled
"""
def transform(self):
[np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
def untransform(self):
[np.put(self._param_array_, ind, c.f(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
def _get_params_transformed(self):
# transformed parameters (apply transformation rules)
p = self._get_params()
p = self._param_array_.copy()
[np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
if self._has_fixes():
return p[self._fixes_]
return p
def _set_params_transformed(self, p):
# inverse apply transformations for parameters and set the resulting parameters
self._set_params(self._untransform_params(p))
if self._has_fixes(): self._param_array_[self._fixes_] = p.copy()
else: self._param_array_[:] = p.copy()
self.untransform()
self._trigger_params_changed()
def _trigger_params_changed(self, trigger_parent=True):
[p._trigger_params_changed(trigger_parent=False) for p in self._parameters_]
if trigger_parent: min_priority = None
else: min_priority = -np.inf
self.notify_observers(None, min_priority)
def _size_transformed(self):
return self.size - self.constraints[__fixed__].size
def _untransform_params(self, p):
p = p.copy()
if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp
[np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
return p
def _get_params(self):
#
# def _untransform_params(self, p):
# # inverse apply transformations for parameters
# #p = p.copy()
# if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp
# [np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
# return p
#
# def _get_params(self):
# """
# get all parameters
# """
# return self._param_array_
# p = np.empty(self.size, dtype=np.float64)
# if self.size == 0:
# return p
# [np.put(p, ind, par._get_params()) for ind, par in itertools.izip(self._param)]
# return p
# def _set_params(self, params, trigger_parent=True):
# self._param_array_.flat = params
# if trigger_parent: min_priority = None
# else: min_priority = -np.inf
# self.notify_observers(None, min_priority)
# don't overwrite this anymore!
if not self.size:
return np.empty(shape=(0,), dtype=np.float64)
return np.hstack([x._get_params() for x in self._parameters_ if x.size > 0])
def _set_params(self, params, trigger_parent=True):
# don't overwrite this anymore!
raise NotImplementedError, "This needs to be implemented in Param and Parametrizable"
#raise NotImplementedError, "Abstract superclass: This needs to be implemented in Param and Parameterizable"
#===========================================================================
# Optimization handles:
@ -396,6 +597,7 @@ class OptimizationHandlable(Constrainable, Observable):
def _get_param_names(self):
n = np.array([p.hierarchy_name() + '[' + str(i) + ']' for p in self.flattened_parameters for i in p._indices()])
return n
def _get_param_names_transformed(self):
n = self._get_param_names()
if self._has_fixes():
@ -405,19 +607,16 @@ class OptimizationHandlable(Constrainable, Observable):
#===========================================================================
# Randomizeable
#===========================================================================
def randomize(self):
def randomize(self, rand_gen=np.random.normal, loc=0, scale=1, *args, **kwargs):
"""
Randomize the model.
Make this draw from the prior if one exists, else draw from N(0,1)
Make this draw from the prior if one exists, else draw from given random generator
"""
# first take care of all parameters (from N(0,1))
# x = self._get_params_transformed()
x = np.random.randn(self._size_transformed())
x = self._untransform_params(x)
x = rand_gen(loc=loc, scale=scale, size=self._size_transformed(), *args, **kwargs)
# now draw from prior where possible
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
self._set_params(x)
# self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
self._set_params_transformed(x) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
class Parameterizable(OptimizationHandlable):
def __init__(self, *args, **kwargs):
@ -427,6 +626,13 @@ class Parameterizable(OptimizationHandlable):
self._added_names_ = set()
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):
"""
Get the names of all parameters of this model.
:param bool add_self: whether to add the own name in front of names
:param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names
:param bool recursive: whether to traverse through hierarchy and append leaf node names
"""
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
else: adjust = lambda x: x
if recursive: names = [xi for x in self._parameters_ for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]
@ -438,8 +644,11 @@ class Parameterizable(OptimizationHandlable):
def num_params(self):
return len(self._parameters_)
def _add_parameter_name(self, param):
def _add_parameter_name(self, param, ignore_added_names=False):
pname = adjust_name_for_printing(param.name)
if ignore_added_names:
self.__dict__[pname] = param
return
# and makes sure to not delete programmatically added parameters
if pname in self.__dict__:
if not (param is self.__dict__[pname]):
@ -461,28 +670,42 @@ class Parameterizable(OptimizationHandlable):
def _name_changed(self, param, old_name):
self._remove_parameter_name(None, old_name)
self._add_parameter_name(param)
def _collect_gradient(self, target):
import itertools
[p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
#=========================================================================
# Gradient handling
#=========================================================================
@property
def gradient(self):
return self._gradient_array_
@gradient.setter
def gradient(self, val):
self._gradient_array_[:] = val
#===========================================================================
# def _collect_gradient(self, target):
# [p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
#===========================================================================
def _set_params(self, params, trigger_parent=True):
import itertools
[p._set_params(params[s], trigger_parent=False) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
if trigger_parent: min_priority = None
else: min_priority = -np.inf
self._notify_observers(None, min_priority)
#===========================================================================
# def _set_params(self, params, trigger_parent=True):
# [p._set_params(params[s], trigger_parent=False) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
# if trigger_parent: min_priority = None
# else: min_priority = -np.inf
# self.notify_observers(None, min_priority)
#===========================================================================
def _set_gradient(self, g):
import itertools
[p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
#===========================================================================
# def _set_gradient(self, g):
# [p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
#===========================================================================
def add_parameter(self, param, index=None):
def add_parameter(self, param, index=None, _ignore_added_names=False):
"""
:param parameters: the parameters to add
:type parameters: list of or one :py:class:`GPy.core.param.Param`
:param [index]: index of where to put parameters
:param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field
Add all parameters to this param class, you can insert parameters
at any given index using the :func:`list.insert` syntax
@ -494,12 +717,12 @@ class Parameterizable(OptimizationHandlable):
self.add_parameter(param, index)
elif param not in self._parameters_:
if param.has_parent():
parent = param._direct_parent_
parent = param._parent_
while parent is not None:
if parent is self:
raise HierarchyError, "You cannot add a parameter twice into the hirarchy"
parent = parent._direct_parent_
param._direct_parent_.remove_parameter(param)
raise HierarchyError, "You cannot add a parameter twice into the hierarchy"
parent = parent._parent_
param._parent_.remove_parameter(param)
# make sure the size is set
if index is None:
self.constraints.update(param.constraints, self.size)
@ -517,7 +740,7 @@ class Parameterizable(OptimizationHandlable):
self.size += param.size
self._connect_parameters()
self._connect_parameters(ignore_added_names=_ignore_added_names)
self._notify_parent_change()
self._connect_fixes()
else:
@ -551,14 +774,14 @@ class Parameterizable(OptimizationHandlable):
self._connect_parameters()
self._notify_parent_change()
parent = self._direct_parent_
parent = self._parent_
while parent is not None:
parent._connect_fixes()
parent._connect_parameters()
parent._notify_parent_change()
parent = parent._direct_parent_
parent = parent._parent_
def _connect_parameters(self):
def _connect_parameters(self, ignore_added_names=False):
# connect parameterlist to this parameterized object
# This just sets up the right connection for the params objects
# to be used as parameters
@ -567,14 +790,35 @@ class Parameterizable(OptimizationHandlable):
if not hasattr(self, "_parameters_") or len(self._parameters_) < 1:
# no parameters for this class
return
sizes = [0]
old_size = 0
self._param_array_ = np.empty(self.size, dtype=np.float64)
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
self._param_slices_ = []
for i, p in enumerate(self._parameters_):
p._direct_parent_ = self
p._parent_ = self
p._parent_index_ = i
sizes.append(p.size + sizes[-1])
self._param_slices_.append(slice(sizes[-2], sizes[-1]))
self._add_parameter_name(p)
pslice = slice(old_size, old_size+p.size)
pi_old_size = old_size
for pi in p.flattened_parameters:
pislice = slice(pi_old_size, pi_old_size+pi.size)
self._param_array_[pislice] = pi._param_array_.flat
self._gradient_array_[pislice] = pi._gradient_array_.flat
pi._param_array_.data = self._param_array_[pislice].data
pi._gradient_array_.data = self._gradient_array_[pislice].data
pi_old_size += pi.size
p._param_array_.data = self._param_array_[pslice].data
p._gradient_array_.data = self._gradient_array_[pslice].data
self._param_slices_.append(pslice)
self._add_parameter_name(p, ignore_added_names=ignore_added_names)
old_size += p.size
#===========================================================================
# notification system
@ -582,7 +826,7 @@ class Parameterizable(OptimizationHandlable):
def _parameters_changed_notification(self, which):
self.parameters_changed()
def _pass_through_notify_observers(self, which):
self._notify_observers(which)
self.notify_observers(which)
#===========================================================================
# TODO: not working yet
@ -595,7 +839,7 @@ class Parameterizable(OptimizationHandlable):
dc = dict()
for k, v in self.__dict__.iteritems():
if k not in ['_direct_parent_', '_parameters_', '_parent_index_', '_observer_callables_'] + self.parameter_names():
if k not in ['_parent_', '_parameters_', '_parent_index_', '_observer_callables_'] + self.parameter_names(recursive=False):
if isinstance(v, (Constrainable, ParameterIndexOperations, ParameterIndexOperationsView)):
dc[k] = v.copy()
else:
@ -603,7 +847,7 @@ class Parameterizable(OptimizationHandlable):
if k == '_parameters_':
params = [p.copy() for p in v]
dc['_direct_parent_'] = None
dc['_parent_'] = None
dc['_parent_index_'] = None
dc['_observer_callables_'] = []
dc['_parameters_'] = ArrayList()
@ -615,11 +859,20 @@ class Parameterizable(OptimizationHandlable):
s.__dict__ = dc
for p in params:
import ipdb;ipdb.set_trace()
s.add_parameter(p)
s.add_parameter(p, _ignore_added_names=True)
return s
#===========================================================================
# From being parentable, we have to define the parent_change notification
#===========================================================================
def _notify_parent_change(self):
"""
Notify all parameters that the parent has changed
"""
for p in self._parameters_:
p._parent_changed(self)
def parameters_changed(self):
"""
This method gets called when parameters have changed.

View file

@ -11,6 +11,12 @@ from parameter_core import Pickleable, Parameterizable, adjust_name_for_printing
from transformations import __fixed__
from lists_and_dicts import ArrayList
class ParametersChangedMeta(type):
def __call__(self, *args, **kw):
instance = super(ParametersChangedMeta, self).__call__(*args, **kw)
instance.parameters_changed()
return instance
class Parameterized(Parameterizable, Pickleable, Gradcheckable):
"""
Parameterized class
@ -53,6 +59,12 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
If you want to operate on all parameters use m[''] to wildcard select all paramters
and concatenate them. Printing m[''] will result in printing of all parameters in detail.
"""
#===========================================================================
# Metaclass for parameters changed after init.
# This makes sure, that parameters changed will always be called after __init__
# **Never** call parameters_changed() yourself
__metaclass__ = ParametersChangedMeta
#===========================================================================
def __init__(self, name=None, *a, **kw):
super(Parameterized, self).__init__(name=name, parent=None, parent_index=None, *a, **kw)
self._in_init_ = True
@ -88,39 +100,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
return G
return node
#===========================================================================
# Pickling operations
#===========================================================================
def pickle(self, f, protocol=-1):
"""
:param f: either filename or open file object to write to.
if it is an open buffer, you have to make sure to close
it properly.
:param protocol: pickling protocol to use, python-pickle for details.
"""
if isinstance(f, str):
with open(f, 'w') as f:
cPickle.dump(self, f, protocol)
else:
cPickle.dump(self, f, protocol)
def copy(self):
c = super(Parameterized, self).copy()
c.add_observer(c, c._parameters_changed_notification, -100)
return c
def __getstate__(self):
if self._has_get_set_state():
return self._getstate()
return self.__dict__
def __setstate__(self, state):
if self._has_get_set_state():
self._setstate(state) # set state
# self._set_params(self._get_params()) # restore all values
return
self.__dict__ = state
def _has_get_set_state(self):
return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__)
def _getstate(self):
"""
Get the current state of the class,
@ -149,25 +129,33 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
self._connect_parameters()
self.parameters_changed()
#===========================================================================
# Override copy to handle programmatically added observers
#===========================================================================
def copy(self):
c = super(Pickleable, self).copy()
c.add_observer(c, c._parameters_changed_notification, -100)
return c
#===========================================================================
# Gradient control
#===========================================================================
def _transform_gradients(self, g):
if self.has_parent():
return g
x = self._get_params()
[numpy.put(g, i, g[i] * c.gradfactor(x[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
for p in self.flattened_parameters:
for t, i in p._tied_to_me_.iteritems():
g[self._offset_for(p) + numpy.array(list(i))] += g[self._raveled_index_for(t)]
[numpy.put(g, i, g[i] * c.gradfactor(self._param_array_[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
#===========================================================================
# Indexable
#===========================================================================
def _offset_for(self, param):
# get the offset in the parameterized index array for param
if param.has_parent():
if param._direct_parent_._get_original(param) in self._parameters_:
return self._param_slices_[param._direct_parent_._get_original(param)._parent_index_].start
return self._offset_for(param._direct_parent_) + param._direct_parent_._offset_for(param)
if param._parent_._get_original(param) in self._parameters_:
return self._param_slices_[param._parent_._get_original(param)._parent_index_].start
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
def _raveled_index_for(self, param):
@ -229,8 +217,8 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
return ParamConcatenation(paramlist)
def __setitem__(self, name, value, paramlist=None):
if isinstance(name, slice):
self[''][name] = value
if isinstance(name, (slice, tuple, np.ndarray)):
self._param_array_[name] = value
else:
try: param = self.__getitem__(name, paramlist)
except AttributeError as a: raise a

View file

@ -10,7 +10,7 @@ import sys
#_lim_val = -np.log(sys.float_info.epsilon)
_exp_lim_val = np.finfo(np.float64).max
_lim_val = np.log(_exp_lim_val)#
_lim_val = np.log(_exp_lim_val)
#===============================================================================
# Fixing constants
@ -57,7 +57,7 @@ class Logexp(Transformation):
return np.where(x>_lim_val, x, np.log(1. + np.exp(np.clip(x, -_lim_val, _lim_val))))
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f) - 1.))
return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.))
def gradfactor(self, f):
return np.where(f>_lim_val, 1., 1 - np.exp(-f))
def initialize(self, f):