mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 00:02:38 +02:00
[parameterized] restructered a lot and finalized some stuff
This commit is contained in:
parent
78ae67bd47
commit
02b5ee1e46
10 changed files with 354 additions and 325 deletions
|
|
@ -38,7 +38,12 @@ class ArrayList(list):
|
||||||
raise ValueError, "{} is not in list".format(item)
|
raise ValueError, "{} is not in list".format(item)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class ObservablesList(object):
|
class ObserverList(object):
|
||||||
|
"""
|
||||||
|
A list which containts the observables.
|
||||||
|
It only holds weak references to observers, such that unbound
|
||||||
|
observers dont dangle in memory.
|
||||||
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._poc = []
|
self._poc = []
|
||||||
|
|
||||||
|
|
@ -46,27 +51,30 @@ class ObservablesList(object):
|
||||||
p,o,c = self._poc[ind]
|
p,o,c = self._poc[ind]
|
||||||
return p, o(), c
|
return p, o(), c
|
||||||
|
|
||||||
def remove(self, priority, observable, callble):
|
def remove(self, priority, observer, callble):
|
||||||
"""
|
"""
|
||||||
|
Remove one observer, which had priority and callble.
|
||||||
"""
|
"""
|
||||||
self.flush()
|
self.flush()
|
||||||
for i in range(len(self) - 1, -1, -1):
|
for i in range(len(self) - 1, -1, -1):
|
||||||
p,o,c = self[i]
|
p,o,c = self[i]
|
||||||
if priority==p and observable==o and callble==c:
|
if priority==p and observer==o and callble==c:
|
||||||
del self._poc[i]
|
del self._poc[i]
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self._poc.__repr__()
|
return self._poc.__repr__()
|
||||||
|
|
||||||
|
|
||||||
def add(self, priority, observable, callble):
|
def add(self, priority, observer, callble):
|
||||||
if observable is not None:
|
"""
|
||||||
|
Add an observer with priority and callble
|
||||||
|
"""
|
||||||
|
if observer is not None:
|
||||||
ins = 0
|
ins = 0
|
||||||
for pr, _, _ in self:
|
for pr, _, _ in self:
|
||||||
if priority > pr:
|
if priority > pr:
|
||||||
break
|
break
|
||||||
ins += 1
|
ins += 1
|
||||||
self._poc.insert(ins, (priority, weakref.ref(observable), callble))
|
self._poc.insert(ins, (priority, weakref.ref(observer), callble))
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
ret = []
|
ret = []
|
||||||
|
|
@ -83,6 +91,9 @@ class ObservablesList(object):
|
||||||
return '\n'.join(ret)
|
return '\n'.join(ret)
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
|
"""
|
||||||
|
Make sure all weak references, which point to nothing are flushed (deleted)
|
||||||
|
"""
|
||||||
self._poc = [(p,o,c) for p,o,c in self._poc if o() is not None]
|
self._poc = [(p,o,c) for p,o,c in self._poc if o() is not None]
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
|
|
@ -95,7 +106,7 @@ class ObservablesList(object):
|
||||||
return self._poc.__len__()
|
return self._poc.__len__()
|
||||||
|
|
||||||
def __deepcopy__(self, memo):
|
def __deepcopy__(self, memo):
|
||||||
s = ObservablesList()
|
s = ObserverList()
|
||||||
for p,o,c in self:
|
for p,o,c in self:
|
||||||
import copy
|
import copy
|
||||||
s.add(p, copy.deepcopy(o, memo), copy.deepcopy(c, memo))
|
s.add(p, copy.deepcopy(o, memo), copy.deepcopy(c, memo))
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@
|
||||||
import itertools
|
import itertools
|
||||||
import numpy
|
import numpy
|
||||||
np = numpy
|
np = numpy
|
||||||
from parameter_core import OptimizationHandlable, adjust_name_for_printing
|
from parameter_core import Parameterizable, adjust_name_for_printing
|
||||||
from observable_array import ObsAr
|
from observable_array import ObsAr
|
||||||
|
|
||||||
###### printing
|
###### printing
|
||||||
|
|
@ -16,7 +16,7 @@ __precision__ = numpy.get_printoptions()['precision'] # numpy printing precision
|
||||||
__print_threshold__ = 5
|
__print_threshold__ = 5
|
||||||
######
|
######
|
||||||
|
|
||||||
class Param(OptimizationHandlable, ObsAr):
|
class Param(Parameterizable, ObsAr):
|
||||||
"""
|
"""
|
||||||
Parameter object for GPy models.
|
Parameter object for GPy models.
|
||||||
|
|
||||||
|
|
@ -42,7 +42,7 @@ class Param(OptimizationHandlable, ObsAr):
|
||||||
"""
|
"""
|
||||||
__array_priority__ = -1 # Never give back Param
|
__array_priority__ = -1 # Never give back Param
|
||||||
_fixes_ = None
|
_fixes_ = None
|
||||||
_parameters_ = []
|
parameters = []
|
||||||
def __new__(cls, name, input_array, default_constraint=None):
|
def __new__(cls, name, input_array, default_constraint=None):
|
||||||
obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array))
|
obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array))
|
||||||
obj._current_slice_ = (slice(obj.shape[0]),)
|
obj._current_slice_ = (slice(obj.shape[0]),)
|
||||||
|
|
@ -87,6 +87,9 @@ class Param(OptimizationHandlable, ObsAr):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def param_array(self):
|
def param_array(self):
|
||||||
|
"""
|
||||||
|
As we are a leaf, this just returns self
|
||||||
|
"""
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|
@ -139,6 +142,9 @@ class Param(OptimizationHandlable, ObsAr):
|
||||||
def _raveled_index_for(self, obj):
|
def _raveled_index_for(self, obj):
|
||||||
return self._raveled_index()
|
return self._raveled_index()
|
||||||
|
|
||||||
|
#===========================================================================
|
||||||
|
# Index recreation
|
||||||
|
#===========================================================================
|
||||||
def _expand_index(self, slice_index=None):
|
def _expand_index(self, slice_index=None):
|
||||||
# this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes
|
# this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes
|
||||||
# it basically translates slices to their respective index arrays and turns negative indices around
|
# it basically translates slices to their respective index arrays and turns negative indices around
|
||||||
|
|
@ -177,15 +183,17 @@ class Param(OptimizationHandlable, ObsAr):
|
||||||
|
|
||||||
This will function will just call visit on self, as Param are leaf nodes.
|
This will function will just call visit on self, as Param are leaf nodes.
|
||||||
"""
|
"""
|
||||||
|
self.__visited = True
|
||||||
visit(self, *args, **kwargs)
|
visit(self, *args, **kwargs)
|
||||||
|
self.__visited = False
|
||||||
|
|
||||||
def traverse_parents(self, visit, *args, **kwargs):
|
def traverse_parents(self, visit, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Traverse the hierarchy upwards, visiting all parents and their children, except self.
|
Traverse the hierarchy upwards, visiting all parents and their children, except self.
|
||||||
See "visitor pattern" in literature. This is implemented in pre-order fashion.
|
See "visitor pattern" in literature. This is implemented in pre-order fashion.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
parents = []
|
parents = []
|
||||||
self.traverse_parents(parents.append)
|
self.traverse_parents(parents.append)
|
||||||
print parents
|
print parents
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ from transformations import Logexp, NegativeLogexp, Logistic, __fixed__, FIXED,
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import re
|
import re
|
||||||
|
|
||||||
__updated__ = '2014-05-12'
|
__updated__ = '2014-05-15'
|
||||||
|
|
||||||
class HierarchyError(Exception):
|
class HierarchyError(Exception):
|
||||||
"""
|
"""
|
||||||
|
|
@ -52,13 +52,23 @@ class Observable(object):
|
||||||
_updated = True
|
_updated = True
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(Observable, self).__init__()
|
super(Observable, self).__init__()
|
||||||
from lists_and_dicts import ObservablesList
|
from lists_and_dicts import ObserverList
|
||||||
self.observers = ObservablesList()
|
self.observers = ObserverList()
|
||||||
|
|
||||||
def add_observer(self, observer, callble, priority=0):
|
def add_observer(self, observer, callble, priority=0):
|
||||||
|
"""
|
||||||
|
Add an observer `observer` with the callback `callble`
|
||||||
|
and priority `priority` to this observers list.
|
||||||
|
"""
|
||||||
self.observers.add(priority, observer, callble)
|
self.observers.add(priority, observer, callble)
|
||||||
|
|
||||||
def remove_observer(self, observer, callble=None):
|
def remove_observer(self, observer, callble=None):
|
||||||
|
"""
|
||||||
|
Either (if callble is None) remove all callables,
|
||||||
|
which were added alongside observer,
|
||||||
|
or remove callable `callble` which was added alongside
|
||||||
|
the observer `observer`.
|
||||||
|
"""
|
||||||
to_remove = []
|
to_remove = []
|
||||||
for poc in self.observers:
|
for poc in self.observers:
|
||||||
_, obs, clble = poc
|
_, obs, clble = poc
|
||||||
|
|
@ -91,10 +101,6 @@ class Observable(object):
|
||||||
break
|
break
|
||||||
callble(self, which=which)
|
callble(self, which=which)
|
||||||
|
|
||||||
#===============================================================================
|
|
||||||
# Foundation framework for parameterized and param objects:
|
|
||||||
#===============================================================================
|
|
||||||
|
|
||||||
class Parentable(object):
|
class Parentable(object):
|
||||||
"""
|
"""
|
||||||
Enable an Object to have a parent.
|
Enable an Object to have a parent.
|
||||||
|
|
@ -172,7 +178,11 @@ class Pickleable(object):
|
||||||
# copy and pickling
|
# copy and pickling
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
def copy(self):
|
def copy(self):
|
||||||
"""Returns a (deep) copy of the current model"""
|
"""
|
||||||
|
Returns a (deep) copy of the current parameter handle.
|
||||||
|
|
||||||
|
All connections to parents of the copy will be cut.
|
||||||
|
"""
|
||||||
#raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy"
|
#raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy"
|
||||||
import copy
|
import copy
|
||||||
memo = {}
|
memo = {}
|
||||||
|
|
@ -196,12 +206,11 @@ class Pickleable(object):
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
ignore_list = ([#'_parent_', '_parent_index_',
|
ignore_list = ['_param_array_', # parameters get set from bottom to top
|
||||||
#'observers',
|
'_gradient_array_', # as well as gradients
|
||||||
'_param_array_', '_gradient_array_', '_fixes_',
|
'_fixes_', # and fixes
|
||||||
'_Cacher_wrap__cachers']
|
'_Cacher_wrap__cachers', # never pickle cachers
|
||||||
#+ self.parameter_names(recursive=False)
|
]
|
||||||
)
|
|
||||||
dc = dict()
|
dc = dict()
|
||||||
for k,v in self.__dict__.iteritems():
|
for k,v in self.__dict__.iteritems():
|
||||||
if k not in ignore_list:
|
if k not in ignore_list:
|
||||||
|
|
@ -246,7 +255,6 @@ class Gradcheckable(Pickleable, Parentable):
|
||||||
"""
|
"""
|
||||||
raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!"
|
raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!"
|
||||||
|
|
||||||
|
|
||||||
class Nameable(Gradcheckable):
|
class Nameable(Gradcheckable):
|
||||||
"""
|
"""
|
||||||
Make an object nameable inside the hierarchy.
|
Make an object nameable inside the hierarchy.
|
||||||
|
|
@ -285,41 +293,8 @@ class Nameable(Gradcheckable):
|
||||||
return self._parent_.hierarchy_name() + "." + adjust(self.name)
|
return self._parent_.hierarchy_name() + "." + adjust(self.name)
|
||||||
return adjust(self.name)
|
return adjust(self.name)
|
||||||
|
|
||||||
class Indexable(object):
|
|
||||||
"""
|
|
||||||
Enable enraveled indexes and offsets for this object.
|
|
||||||
The raveled index of an object is the index for its parameters in a flattened int array.
|
|
||||||
"""
|
|
||||||
def __init__(self, *a, **kw):
|
|
||||||
super(Indexable, self).__init__()
|
|
||||||
|
|
||||||
def _raveled_index(self):
|
class Indexable(Nameable, Observable):
|
||||||
"""
|
|
||||||
Flattened array of ints, specifying the index of this object.
|
|
||||||
This has to account for shaped parameters!
|
|
||||||
"""
|
|
||||||
raise NotImplementedError, "Need to be able to get the raveled Index"
|
|
||||||
|
|
||||||
def _offset_for(self, param):
|
|
||||||
"""
|
|
||||||
Return the offset of the param inside this parameterized object.
|
|
||||||
This does not need to account for shaped parameters, as it
|
|
||||||
basically just sums up the parameter sizes which come before param.
|
|
||||||
"""
|
|
||||||
return 0
|
|
||||||
#raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?"
|
|
||||||
|
|
||||||
def _raveled_index_for(self, param):
|
|
||||||
"""
|
|
||||||
get the raveled index for a param
|
|
||||||
that is an int array, containing the indexes for the flattened
|
|
||||||
param inside this parameterized logic.
|
|
||||||
"""
|
|
||||||
return param._raveled_index()
|
|
||||||
#raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?"
|
|
||||||
|
|
||||||
|
|
||||||
class Constrainable(Nameable, Indexable, Observable):
|
|
||||||
"""
|
"""
|
||||||
Make an object constrainable with Priors and Transformations.
|
Make an object constrainable with Priors and Transformations.
|
||||||
TODO: Mappings!!
|
TODO: Mappings!!
|
||||||
|
|
@ -330,7 +305,7 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
:func:`constrain()` and :func:`unconstrain()` are main methods here
|
:func:`constrain()` and :func:`unconstrain()` are main methods here
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, default_constraint=None, *a, **kw):
|
def __init__(self, name, default_constraint=None, *a, **kw):
|
||||||
super(Constrainable, self).__init__(name=name, *a, **kw)
|
super(Indexable, self).__init__(name=name, *a, **kw)
|
||||||
self._default_constraint_ = default_constraint
|
self._default_constraint_ = default_constraint
|
||||||
from index_operations import ParameterIndexOperations
|
from index_operations import ParameterIndexOperations
|
||||||
self.constraints = ParameterIndexOperations()
|
self.constraints = ParameterIndexOperations()
|
||||||
|
|
@ -352,6 +327,39 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
self._connect_fixes()
|
self._connect_fixes()
|
||||||
self._notify_parent_change()
|
self._notify_parent_change()
|
||||||
|
|
||||||
|
#===========================================================================
|
||||||
|
# Indexable
|
||||||
|
#===========================================================================
|
||||||
|
def _offset_for(self, param):
|
||||||
|
"""
|
||||||
|
Return the offset of the param inside this parameterized object.
|
||||||
|
This does not need to account for shaped parameters, as it
|
||||||
|
basically just sums up the parameter sizes which come before param.
|
||||||
|
"""
|
||||||
|
if param.has_parent():
|
||||||
|
if param._parent_._get_original(param) in self.parameters:
|
||||||
|
return self._param_slices_[param._parent_._get_original(param)._parent_index_].start
|
||||||
|
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _raveled_index_for(self, param):
|
||||||
|
"""
|
||||||
|
get the raveled index for a param
|
||||||
|
that is an int array, containing the indexes for the flattened
|
||||||
|
param inside this parameterized logic.
|
||||||
|
"""
|
||||||
|
from param import ParamConcatenation
|
||||||
|
if isinstance(param, ParamConcatenation):
|
||||||
|
return np.hstack((self._raveled_index_for(p) for p in param.params))
|
||||||
|
return param._raveled_index() + self._offset_for(param)
|
||||||
|
|
||||||
|
def _raveled_index(self):
|
||||||
|
"""
|
||||||
|
Flattened array of ints, specifying the index of this object.
|
||||||
|
This has to account for shaped parameters!
|
||||||
|
"""
|
||||||
|
return np.r_[:self.size]
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Fixing Parameters:
|
# Fixing Parameters:
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
|
|
@ -406,9 +414,24 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
self._fixes_ = None
|
self._fixes_ = None
|
||||||
del self.constraints[__fixed__]
|
del self.constraints[__fixed__]
|
||||||
|
|
||||||
|
#===========================================================================
|
||||||
|
# Convenience for fixed
|
||||||
|
#===========================================================================
|
||||||
def _has_fixes(self):
|
def _has_fixes(self):
|
||||||
return hasattr(self, "_fixes_") and self._fixes_ is not None and self._fixes_.size == self.size
|
return hasattr(self, "_fixes_") and self._fixes_ is not None and self._fixes_.size == self.size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_fixed(self):
|
||||||
|
for p in self.parameters:
|
||||||
|
if not p.is_fixed: return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _get_original(self, param):
|
||||||
|
# if advanced indexing is activated it happens that the array is a copy
|
||||||
|
# you can retrieve the original param through this method, by passing
|
||||||
|
# the copy here
|
||||||
|
return self.parameters[param._parent_index_]
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Prior Operations
|
# Prior Operations
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
|
|
@ -432,8 +455,7 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
|
|
||||||
def unset_priors(self, *priors):
|
def unset_priors(self, *priors):
|
||||||
"""
|
"""
|
||||||
Un-set all priors given from this parameter handle.
|
Un-set all priors given (in *priors) from this parameter handle.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self._remove_from_index_operations(self.priors, priors)
|
return self._remove_from_index_operations(self.priors, priors)
|
||||||
|
|
||||||
|
|
@ -535,7 +557,7 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
self.constraints = ParameterIndexOperationsView(parent.constraints, parent._offset_for(self), self.size)
|
self.constraints = ParameterIndexOperationsView(parent.constraints, parent._offset_for(self), self.size)
|
||||||
self.priors = ParameterIndexOperationsView(parent.priors, parent._offset_for(self), self.size)
|
self.priors = ParameterIndexOperationsView(parent.priors, parent._offset_for(self), self.size)
|
||||||
self._fixes_ = None
|
self._fixes_ = None
|
||||||
for p in self._parameters_:
|
for p in self.parameters:
|
||||||
p._parent_changed(parent)
|
p._parent_changed(parent)
|
||||||
|
|
||||||
def _add_to_index_operations(self, which, reconstrained, what, warning):
|
def _add_to_index_operations(self, which, reconstrained, what, warning):
|
||||||
|
|
@ -563,14 +585,13 @@ class Constrainable(Nameable, Indexable, Observable):
|
||||||
removed = np.empty((0,), dtype=int)
|
removed = np.empty((0,), dtype=int)
|
||||||
for t in transforms:
|
for t in transforms:
|
||||||
unconstrained = which.remove(t, self._raveled_index())
|
unconstrained = which.remove(t, self._raveled_index())
|
||||||
print unconstrained
|
|
||||||
removed = np.union1d(removed, unconstrained)
|
removed = np.union1d(removed, unconstrained)
|
||||||
if t is __fixed__:
|
if t is __fixed__:
|
||||||
self._highest_parent_._set_unfixed(self, unconstrained)
|
self._highest_parent_._set_unfixed(self, unconstrained)
|
||||||
|
|
||||||
return removed
|
return removed
|
||||||
|
|
||||||
class OptimizationHandlable(Constrainable):
|
class OptimizationHandlable(Indexable):
|
||||||
"""
|
"""
|
||||||
This enables optimization handles on an Object as done in GPy 0.4.
|
This enables optimization handles on an Object as done in GPy 0.4.
|
||||||
|
|
||||||
|
|
@ -580,7 +601,7 @@ class OptimizationHandlable(Constrainable):
|
||||||
super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
|
super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
|
||||||
|
|
||||||
def _get_params_transformed(self):
|
def _get_params_transformed(self):
|
||||||
# transformed parameters (apply transformation rules)
|
# transformed parameters (apply un-transformation rules)
|
||||||
p = self.param_array.copy()
|
p = self.param_array.copy()
|
||||||
[np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
|
[np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||||
if self.has_parent() and self.constraints[__fixed__].size != 0:
|
if self.has_parent() and self.constraints[__fixed__].size != 0:
|
||||||
|
|
@ -592,6 +613,11 @@ class OptimizationHandlable(Constrainable):
|
||||||
return p
|
return p
|
||||||
|
|
||||||
def _set_params_transformed(self, p):
|
def _set_params_transformed(self, p):
|
||||||
|
"""
|
||||||
|
Set parameters p, but make sure they get transformed before setting.
|
||||||
|
This means, the optimizer sees p, whereas the model sees transformed(p),
|
||||||
|
such that, the parameters the model sees are in the right domain.
|
||||||
|
"""
|
||||||
if not(p is self.param_array):
|
if not(p is self.param_array):
|
||||||
if self.has_parent() and self.constraints[__fixed__].size != 0:
|
if self.has_parent() and self.constraints[__fixed__].size != 0:
|
||||||
fixes = np.ones(self.size).astype(bool)
|
fixes = np.ones(self.size).astype(bool)
|
||||||
|
|
@ -604,12 +630,33 @@ class OptimizationHandlable(Constrainable):
|
||||||
self._trigger_params_changed()
|
self._trigger_params_changed()
|
||||||
|
|
||||||
def _trigger_params_changed(self, trigger_parent=True):
|
def _trigger_params_changed(self, trigger_parent=True):
|
||||||
[p._trigger_params_changed(trigger_parent=False) for p in self._parameters_]
|
"""
|
||||||
|
First tell all children to update,
|
||||||
|
then update yourself.
|
||||||
|
|
||||||
|
If trigger_parent is True, we will tell the parent, otherwise not.
|
||||||
|
"""
|
||||||
|
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters]
|
||||||
self.notify_observers(None, None if trigger_parent else -np.inf)
|
self.notify_observers(None, None if trigger_parent else -np.inf)
|
||||||
|
|
||||||
def _size_transformed(self):
|
def _size_transformed(self):
|
||||||
|
"""
|
||||||
|
As fixes are not passed to the optimiser, the size of the model for the optimiser
|
||||||
|
is the size of all parameters minus the size of the fixes.
|
||||||
|
"""
|
||||||
return self.size - self.constraints[__fixed__].size
|
return self.size - self.constraints[__fixed__].size
|
||||||
|
|
||||||
|
def _transform_gradients(self, g):
|
||||||
|
"""
|
||||||
|
Transform the gradients by multiplying the gradient factor for each
|
||||||
|
constraint to it.
|
||||||
|
"""
|
||||||
|
if self.has_parent():
|
||||||
|
return g
|
||||||
|
[np.put(g, i, g[i] * c.gradfactor(self.param_array[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
||||||
|
if self._has_fixes(): return g[self._fixes_]
|
||||||
|
return g
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_params(self):
|
def num_params(self):
|
||||||
"""
|
"""
|
||||||
|
|
@ -628,8 +675,8 @@ class OptimizationHandlable(Constrainable):
|
||||||
"""
|
"""
|
||||||
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
|
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
|
||||||
else: adjust = lambda x: x
|
else: adjust = lambda x: x
|
||||||
if recursive: names = [xi for x in self._parameters_ for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]
|
if recursive: names = [xi for x in self.parameters for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]
|
||||||
else: names = [adjust(x.name) for x in self._parameters_]
|
else: names = [adjust(x.name) for x in self.parameters]
|
||||||
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
|
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
|
@ -651,7 +698,7 @@ class OptimizationHandlable(Constrainable):
|
||||||
Randomize the model.
|
Randomize the model.
|
||||||
Make this draw from the prior if one exists, else draw from given random generator
|
Make this draw from the prior if one exists, else draw from given random generator
|
||||||
|
|
||||||
:param rand_gen: numpy random number generator which takes args and kwargs
|
:param rand_gen: np random number generator which takes args and kwargs
|
||||||
:param flaot loc: loc parameter for random number generator
|
:param flaot loc: loc parameter for random number generator
|
||||||
:param float scale: scale parameter for random number generator
|
:param float scale: scale parameter for random number generator
|
||||||
:param args, kwargs: will be passed through to random number generator
|
:param args, kwargs: will be passed through to random number generator
|
||||||
|
|
@ -667,7 +714,7 @@ class OptimizationHandlable(Constrainable):
|
||||||
# for all parameterized objects
|
# for all parameterized objects
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
@property
|
@property
|
||||||
def full_gradient(self):
|
def gradient_full(self):
|
||||||
"""
|
"""
|
||||||
Note to users:
|
Note to users:
|
||||||
This does not return the gradient in the right shape! Use self.gradient
|
This does not return the gradient in the right shape! Use self.gradient
|
||||||
|
|
@ -681,26 +728,43 @@ class OptimizationHandlable(Constrainable):
|
||||||
return self._gradient_array_
|
return self._gradient_array_
|
||||||
|
|
||||||
def _propagate_param_grad(self, parray, garray):
|
def _propagate_param_grad(self, parray, garray):
|
||||||
|
"""
|
||||||
|
For propagating the param_array and gradient_array.
|
||||||
|
This ensures the in memory view of each subsequent array.
|
||||||
|
|
||||||
|
1.) connect param_array of children to self.param_array
|
||||||
|
2.) tell all children to propagate further
|
||||||
|
"""
|
||||||
pi_old_size = 0
|
pi_old_size = 0
|
||||||
for pi in self._parameters_:
|
for pi in self.parameters:
|
||||||
pislice = slice(pi_old_size, pi_old_size + pi.size)
|
pislice = slice(pi_old_size, pi_old_size + pi.size)
|
||||||
|
|
||||||
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
|
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
|
||||||
self.full_gradient[pislice] = pi.full_gradient.flat # , requirements=['C', 'W']).flat
|
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
|
||||||
|
|
||||||
pi.param_array.data = parray[pislice].data
|
pi.param_array.data = parray[pislice].data
|
||||||
pi.full_gradient.data = garray[pislice].data
|
pi.gradient_full.data = garray[pislice].data
|
||||||
|
|
||||||
pi._propagate_param_grad(parray[pislice], garray[pislice])
|
pi._propagate_param_grad(parray[pislice], garray[pislice])
|
||||||
pi_old_size += pi.size
|
pi_old_size += pi.size
|
||||||
|
|
||||||
class Parameterizable(OptimizationHandlable):
|
class Parameterizable(OptimizationHandlable):
|
||||||
|
"""
|
||||||
|
A parameterisable class.
|
||||||
|
|
||||||
|
This class provides the parameters list (ArrayList) and standard parameter handling,
|
||||||
|
such as {add|remove}_parameter(), traverse hierarchy and param_array, gradient_array
|
||||||
|
and the empty parameters_changed().
|
||||||
|
|
||||||
|
This class is abstract and should not be instantiated.
|
||||||
|
Use GPy.core.Parameterized() as node (or leaf) in the parameterized hierarchy.
|
||||||
|
Use GPy.core.Param() for a leaf in the parameterized hierarchy.
|
||||||
|
"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(Parameterizable, self).__init__(*args, **kwargs)
|
super(Parameterizable, self).__init__(*args, **kwargs)
|
||||||
from GPy.core.parameterization.lists_and_dicts import ArrayList
|
from GPy.core.parameterization.lists_and_dicts import ArrayList
|
||||||
self._parameters_ = ArrayList()
|
self.parameters = ArrayList()
|
||||||
self._param_array_ = None
|
self._param_array_ = None
|
||||||
self.size = 0
|
|
||||||
self._added_names_ = set()
|
self._added_names_ = set()
|
||||||
self.__visited = False # for traversing in reverse order we need to know if we were here already
|
self.__visited = False # for traversing in reverse order we need to know if we were here already
|
||||||
|
|
||||||
|
|
@ -735,7 +799,7 @@ class Parameterizable(OptimizationHandlable):
|
||||||
if not self.__visited:
|
if not self.__visited:
|
||||||
visit(self, *args, **kwargs)
|
visit(self, *args, **kwargs)
|
||||||
self.__visited = True
|
self.__visited = True
|
||||||
for c in self._parameters_:
|
for c in self.parameters:
|
||||||
c.traverse(visit, *args, **kwargs)
|
c.traverse(visit, *args, **kwargs)
|
||||||
self.__visited = False
|
self.__visited = False
|
||||||
|
|
||||||
|
|
@ -743,9 +807,9 @@ class Parameterizable(OptimizationHandlable):
|
||||||
"""
|
"""
|
||||||
Traverse the hierarchy upwards, visiting all parents and their children except self.
|
Traverse the hierarchy upwards, visiting all parents and their children except self.
|
||||||
See "visitor pattern" in literature. This is implemented in pre-order fashion.
|
See "visitor pattern" in literature. This is implemented in pre-order fashion.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
parents = []
|
parents = []
|
||||||
self.traverse_parents(parents.append)
|
self.traverse_parents(parents.append)
|
||||||
print parents
|
print parents
|
||||||
|
|
@ -754,7 +818,7 @@ class Parameterizable(OptimizationHandlable):
|
||||||
self.__visited = True
|
self.__visited = True
|
||||||
self._parent_._traverse_parents(visit, *args, **kwargs)
|
self._parent_._traverse_parents(visit, *args, **kwargs)
|
||||||
self.__visited = False
|
self.__visited = False
|
||||||
|
|
||||||
def _traverse_parents(self, visit, *args, **kwargs):
|
def _traverse_parents(self, visit, *args, **kwargs):
|
||||||
if not self.__visited:
|
if not self.__visited:
|
||||||
self.__visited = True
|
self.__visited = True
|
||||||
|
|
@ -779,7 +843,7 @@ class Parameterizable(OptimizationHandlable):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_params(self):
|
def num_params(self):
|
||||||
return len(self._parameters_)
|
return len(self.parameters)
|
||||||
|
|
||||||
def _add_parameter_name(self, param, ignore_added_names=False):
|
def _add_parameter_name(self, param, ignore_added_names=False):
|
||||||
pname = adjust_name_for_printing(param.name)
|
pname = adjust_name_for_printing(param.name)
|
||||||
|
|
@ -812,132 +876,6 @@ class Parameterizable(OptimizationHandlable):
|
||||||
self._remove_parameter_name(None, old_name)
|
self._remove_parameter_name(None, old_name)
|
||||||
self._add_parameter_name(param)
|
self._add_parameter_name(param)
|
||||||
|
|
||||||
def add_parameter(self, param, index=None, _ignore_added_names=False):
|
|
||||||
"""
|
|
||||||
:param parameters: the parameters to add
|
|
||||||
:type parameters: list of or one :py:class:`GPy.core.param.Param`
|
|
||||||
:param [index]: index of where to put parameters
|
|
||||||
|
|
||||||
:param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field
|
|
||||||
|
|
||||||
Add all parameters to this param class, you can insert parameters
|
|
||||||
at any given index using the :func:`list.insert` syntax
|
|
||||||
"""
|
|
||||||
if param in self._parameters_ and index is not None:
|
|
||||||
self.remove_parameter(param)
|
|
||||||
self.add_parameter(param, index)
|
|
||||||
# elif param.has_parent():
|
|
||||||
# raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short())
|
|
||||||
elif param not in self._parameters_:
|
|
||||||
if param.has_parent():
|
|
||||||
def visit(parent, self):
|
|
||||||
if parent is self:
|
|
||||||
raise HierarchyError, "You cannot add a parameter twice into the hierarchy"
|
|
||||||
param.traverse_parents(visit, self)
|
|
||||||
param._parent_.remove_parameter(param)
|
|
||||||
# make sure the size is set
|
|
||||||
if index is None:
|
|
||||||
self.constraints.update(param.constraints, self.size)
|
|
||||||
self.priors.update(param.priors, self.size)
|
|
||||||
self._parameters_.append(param)
|
|
||||||
else:
|
|
||||||
start = sum(p.size for p in self._parameters_[:index])
|
|
||||||
self.constraints.shift_right(start, param.size)
|
|
||||||
self.priors.shift_right(start, param.size)
|
|
||||||
self.constraints.update(param.constraints, start)
|
|
||||||
self.priors.update(param.priors, start)
|
|
||||||
self._parameters_.insert(index, param)
|
|
||||||
|
|
||||||
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
|
|
||||||
|
|
||||||
parent = self
|
|
||||||
while parent is not None:
|
|
||||||
parent.size += param.size
|
|
||||||
parent = parent._parent_
|
|
||||||
|
|
||||||
self._connect_parameters()
|
|
||||||
|
|
||||||
self._highest_parent_._connect_parameters(ignore_added_names=_ignore_added_names)
|
|
||||||
self._highest_parent_._notify_parent_change()
|
|
||||||
self._highest_parent_._connect_fixes()
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise HierarchyError, """Parameter exists already and no copy made"""
|
|
||||||
|
|
||||||
|
|
||||||
def add_parameters(self, *parameters):
|
|
||||||
"""
|
|
||||||
convenience method for adding several
|
|
||||||
parameters without gradient specification
|
|
||||||
"""
|
|
||||||
[self.add_parameter(p) for p in parameters]
|
|
||||||
|
|
||||||
def remove_parameter(self, param):
|
|
||||||
"""
|
|
||||||
:param param: param object to remove from being a parameter of this parameterized object.
|
|
||||||
"""
|
|
||||||
if not param in self._parameters_:
|
|
||||||
raise RuntimeError, "Parameter {} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)
|
|
||||||
|
|
||||||
start = sum([p.size for p in self._parameters_[:param._parent_index_]])
|
|
||||||
self._remove_parameter_name(param)
|
|
||||||
self.size -= param.size
|
|
||||||
del self._parameters_[param._parent_index_]
|
|
||||||
|
|
||||||
param._disconnect_parent()
|
|
||||||
param.remove_observer(self, self._pass_through_notify_observers)
|
|
||||||
self.constraints.shift_left(start, param.size)
|
|
||||||
|
|
||||||
self._connect_parameters()
|
|
||||||
self._notify_parent_change()
|
|
||||||
|
|
||||||
parent = self._parent_
|
|
||||||
while parent is not None:
|
|
||||||
parent.size -= param.size
|
|
||||||
parent = parent._parent_
|
|
||||||
|
|
||||||
self._highest_parent_._connect_parameters()
|
|
||||||
self._highest_parent_._connect_fixes()
|
|
||||||
self._highest_parent_._notify_parent_change()
|
|
||||||
|
|
||||||
def _connect_parameters(self, ignore_added_names=False):
|
|
||||||
# connect parameterlist to this parameterized object
|
|
||||||
# This just sets up the right connection for the params objects
|
|
||||||
# to be used as parameters
|
|
||||||
# it also sets the constraints for each parameter to the constraints
|
|
||||||
# of their respective parents
|
|
||||||
if not hasattr(self, "_parameters_") or len(self._parameters_) < 1:
|
|
||||||
# no parameters for this class
|
|
||||||
return
|
|
||||||
if self.param_array.size != self.size:
|
|
||||||
self.param_array = np.empty(self.size, dtype=np.float64)
|
|
||||||
if self.gradient.size != self.size:
|
|
||||||
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
|
|
||||||
|
|
||||||
old_size = 0
|
|
||||||
self._param_slices_ = []
|
|
||||||
for i, p in enumerate(self._parameters_):
|
|
||||||
p._parent_ = self
|
|
||||||
p._parent_index_ = i
|
|
||||||
|
|
||||||
pslice = slice(old_size, old_size + p.size)
|
|
||||||
# first connect all children
|
|
||||||
p._propagate_param_grad(self.param_array[pslice], self.full_gradient[pslice])
|
|
||||||
# then connect children to self
|
|
||||||
self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C')
|
|
||||||
self.full_gradient[pslice] = p.full_gradient.flat # , requirements=['C', 'W']).ravel(order='C')
|
|
||||||
|
|
||||||
if not p.param_array.flags['C_CONTIGUOUS']:
|
|
||||||
raise ValueError, "This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS"
|
|
||||||
|
|
||||||
p.param_array.data = self.param_array[pslice].data
|
|
||||||
p.full_gradient.data = self.full_gradient[pslice].data
|
|
||||||
|
|
||||||
self._param_slices_.append(pslice)
|
|
||||||
|
|
||||||
self._add_parameter_name(p, ignore_added_names=ignore_added_names)
|
|
||||||
old_size += p.size
|
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# notification system
|
# notification system
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
|
|
@ -947,30 +885,13 @@ class Parameterizable(OptimizationHandlable):
|
||||||
self.notify_observers(which=which)
|
self.notify_observers(which=which)
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Pickling
|
|
||||||
#===========================================================================
|
|
||||||
def __setstate__(self, state):
|
|
||||||
super(Parameterizable, self).__setstate__(state)
|
|
||||||
self._connect_parameters()
|
|
||||||
self._connect_fixes()
|
|
||||||
self._notify_parent_change()
|
|
||||||
|
|
||||||
self.parameters_changed()
|
|
||||||
|
|
||||||
def copy(self):
|
|
||||||
c = super(Parameterizable, self).copy()
|
|
||||||
c._connect_parameters()
|
|
||||||
c._connect_fixes()
|
|
||||||
c._notify_parent_change()
|
|
||||||
return c
|
|
||||||
#===========================================================================
|
|
||||||
# From being parentable, we have to define the parent_change notification
|
# From being parentable, we have to define the parent_change notification
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
def _notify_parent_change(self):
|
def _notify_parent_change(self):
|
||||||
"""
|
"""
|
||||||
Notify all parameters that the parent has changed
|
Notify all parameters that the parent has changed
|
||||||
"""
|
"""
|
||||||
for p in self._parameters_:
|
for p in self.parameters:
|
||||||
p._parent_changed(self)
|
p._parent_changed(self)
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
|
|
|
||||||
|
|
@ -3,13 +3,10 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy; np = numpy
|
import numpy; np = numpy
|
||||||
import cPickle
|
|
||||||
import itertools
|
import itertools
|
||||||
from re import compile, _pattern_type
|
from re import compile, _pattern_type
|
||||||
from param import ParamConcatenation
|
from param import ParamConcatenation
|
||||||
from parameter_core import Pickleable, Parameterizable, adjust_name_for_printing
|
from parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing
|
||||||
from transformations import __fixed__
|
|
||||||
from lists_and_dicts import ArrayList
|
|
||||||
|
|
||||||
class ParametersChangedMeta(type):
|
class ParametersChangedMeta(type):
|
||||||
def __call__(self, *args, **kw):
|
def __call__(self, *args, **kw):
|
||||||
|
|
@ -68,8 +65,7 @@ class Parameterized(Parameterizable):
|
||||||
def __init__(self, name=None, parameters=[], *a, **kw):
|
def __init__(self, name=None, parameters=[], *a, **kw):
|
||||||
super(Parameterized, self).__init__(name=name, *a, **kw)
|
super(Parameterized, self).__init__(name=name, *a, **kw)
|
||||||
self._in_init_ = True
|
self._in_init_ = True
|
||||||
self._parameters_ = ArrayList()
|
self.size = sum(p.size for p in self.parameters)
|
||||||
self.size = sum(p.size for p in self._parameters_)
|
|
||||||
self.add_observer(self, self._parameters_changed_notification, -100)
|
self.add_observer(self, self._parameters_changed_notification, -100)
|
||||||
if not self._has_fixes():
|
if not self._has_fixes():
|
||||||
self._fixes_ = None
|
self._fixes_ = None
|
||||||
|
|
@ -86,7 +82,7 @@ class Parameterized(Parameterizable):
|
||||||
iamroot=True
|
iamroot=True
|
||||||
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
|
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
|
||||||
G.add_node(node)
|
G.add_node(node)
|
||||||
for child in self._parameters_:
|
for child in self.parameters:
|
||||||
child_node = child.build_pydot(G)
|
child_node = child.build_pydot(G)
|
||||||
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
|
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
|
||||||
|
|
||||||
|
|
@ -102,58 +98,133 @@ class Parameterized(Parameterizable):
|
||||||
return node
|
return node
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Gradient control
|
# Add remove parameters:
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
def _transform_gradients(self, g):
|
def add_parameter(self, param, index=None, _ignore_added_names=False):
|
||||||
if self.has_parent():
|
|
||||||
return g
|
|
||||||
[numpy.put(g, i, g[i] * c.gradfactor(self.param_array[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
|
||||||
if self._has_fixes(): return g[self._fixes_]
|
|
||||||
return g
|
|
||||||
|
|
||||||
|
|
||||||
#===========================================================================
|
|
||||||
# Indexable
|
|
||||||
#===========================================================================
|
|
||||||
def _offset_for(self, param):
|
|
||||||
# get the offset in the parameterized index array for param
|
|
||||||
if param.has_parent():
|
|
||||||
if param._parent_._get_original(param) in self._parameters_:
|
|
||||||
return self._param_slices_[param._parent_._get_original(param)._parent_index_].start
|
|
||||||
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def _raveled_index_for(self, param):
|
|
||||||
"""
|
"""
|
||||||
get the raveled index for a param
|
:param parameters: the parameters to add
|
||||||
that is an int array, containing the indexes for the flattened
|
:type parameters: list of or one :py:class:`GPy.core.param.Param`
|
||||||
param inside this parameterized logic.
|
:param [index]: index of where to put parameters
|
||||||
"""
|
|
||||||
if isinstance(param, ParamConcatenation):
|
|
||||||
return numpy.hstack((self._raveled_index_for(p) for p in param.params))
|
|
||||||
return param._raveled_index() + self._offset_for(param)
|
|
||||||
|
|
||||||
def _raveled_index(self):
|
:param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field
|
||||||
"""
|
|
||||||
get the raveled index for this object,
|
|
||||||
this is not in the global view of things!
|
|
||||||
"""
|
|
||||||
return numpy.r_[:self.size]
|
|
||||||
|
|
||||||
#===========================================================================
|
Add all parameters to this param class, you can insert parameters
|
||||||
# Convenience for fixed, tied checking of param:
|
at any given index using the :func:`list.insert` syntax
|
||||||
#===========================================================================
|
"""
|
||||||
@property
|
if param in self.parameters and index is not None:
|
||||||
def is_fixed(self):
|
self.remove_parameter(param)
|
||||||
for p in self._parameters_:
|
self.add_parameter(param, index)
|
||||||
if not p.is_fixed: return False
|
# elif param.has_parent():
|
||||||
return True
|
# raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short())
|
||||||
|
elif param not in self.parameters:
|
||||||
|
if param.has_parent():
|
||||||
|
def visit(parent, self):
|
||||||
|
if parent is self:
|
||||||
|
raise HierarchyError, "You cannot add a parameter twice into the hierarchy"
|
||||||
|
param.traverse_parents(visit, self)
|
||||||
|
param._parent_.remove_parameter(param)
|
||||||
|
# make sure the size is set
|
||||||
|
if index is None:
|
||||||
|
self.constraints.update(param.constraints, self.size)
|
||||||
|
self.priors.update(param.priors, self.size)
|
||||||
|
self.parameters.append(param)
|
||||||
|
else:
|
||||||
|
start = sum(p.size for p in self.parameters[:index])
|
||||||
|
self.constraints.shift_right(start, param.size)
|
||||||
|
self.priors.shift_right(start, param.size)
|
||||||
|
self.constraints.update(param.constraints, start)
|
||||||
|
self.priors.update(param.priors, start)
|
||||||
|
self.parameters.insert(index, param)
|
||||||
|
|
||||||
def _get_original(self, param):
|
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
|
||||||
# if advanced indexing is activated it happens that the array is a copy
|
|
||||||
# you can retrieve the original param through this method, by passing
|
parent = self
|
||||||
# the copy here
|
while parent is not None:
|
||||||
return self._parameters_[param._parent_index_]
|
parent.size += param.size
|
||||||
|
parent = parent._parent_
|
||||||
|
|
||||||
|
self._connect_parameters()
|
||||||
|
|
||||||
|
self._highest_parent_._connect_parameters(ignore_added_names=_ignore_added_names)
|
||||||
|
self._highest_parent_._notify_parent_change()
|
||||||
|
self._highest_parent_._connect_fixes()
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise HierarchyError, """Parameter exists already and no copy made"""
|
||||||
|
|
||||||
|
|
||||||
|
def add_parameters(self, *parameters):
|
||||||
|
"""
|
||||||
|
convenience method for adding several
|
||||||
|
parameters without gradient specification
|
||||||
|
"""
|
||||||
|
[self.add_parameter(p) for p in parameters]
|
||||||
|
|
||||||
|
def remove_parameter(self, param):
|
||||||
|
"""
|
||||||
|
:param param: param object to remove from being a parameter of this parameterized object.
|
||||||
|
"""
|
||||||
|
if not param in self.parameters:
|
||||||
|
raise RuntimeError, "Parameter {} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)
|
||||||
|
|
||||||
|
start = sum([p.size for p in self.parameters[:param._parent_index_]])
|
||||||
|
self._remove_parameter_name(param)
|
||||||
|
self.size -= param.size
|
||||||
|
del self.parameters[param._parent_index_]
|
||||||
|
|
||||||
|
param._disconnect_parent()
|
||||||
|
param.remove_observer(self, self._pass_through_notify_observers)
|
||||||
|
self.constraints.shift_left(start, param.size)
|
||||||
|
|
||||||
|
self._connect_parameters()
|
||||||
|
self._notify_parent_change()
|
||||||
|
|
||||||
|
parent = self._parent_
|
||||||
|
while parent is not None:
|
||||||
|
parent.size -= param.size
|
||||||
|
parent = parent._parent_
|
||||||
|
|
||||||
|
self._highest_parent_._connect_parameters()
|
||||||
|
self._highest_parent_._connect_fixes()
|
||||||
|
self._highest_parent_._notify_parent_change()
|
||||||
|
|
||||||
|
def _connect_parameters(self, ignore_added_names=False):
|
||||||
|
# connect parameterlist to this parameterized object
|
||||||
|
# This just sets up the right connection for the params objects
|
||||||
|
# to be used as parameters
|
||||||
|
# it also sets the constraints for each parameter to the constraints
|
||||||
|
# of their respective parents
|
||||||
|
if not hasattr(self, "parameters") or len(self.parameters) < 1:
|
||||||
|
# no parameters for this class
|
||||||
|
return
|
||||||
|
if self.param_array.size != self.size:
|
||||||
|
self.param_array = np.empty(self.size, dtype=np.float64)
|
||||||
|
if self.gradient.size != self.size:
|
||||||
|
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
|
||||||
|
|
||||||
|
old_size = 0
|
||||||
|
self._param_slices_ = []
|
||||||
|
for i, p in enumerate(self.parameters):
|
||||||
|
p._parent_ = self
|
||||||
|
p._parent_index_ = i
|
||||||
|
|
||||||
|
pslice = slice(old_size, old_size + p.size)
|
||||||
|
# first connect all children
|
||||||
|
p._propagate_param_grad(self.param_array[pslice], self.gradient_full[pslice])
|
||||||
|
# then connect children to self
|
||||||
|
self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C')
|
||||||
|
self.gradient_full[pslice] = p.gradient_full.flat # , requirements=['C', 'W']).ravel(order='C')
|
||||||
|
|
||||||
|
if not p.param_array.flags['C_CONTIGUOUS']:
|
||||||
|
raise ValueError, "This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS"
|
||||||
|
|
||||||
|
p.param_array.data = self.param_array[pslice].data
|
||||||
|
p.gradient_full.data = self.gradient_full[pslice].data
|
||||||
|
|
||||||
|
self._param_slices_.append(pslice)
|
||||||
|
|
||||||
|
self._add_parameter_name(p, ignore_added_names=ignore_added_names)
|
||||||
|
old_size += p.size
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Get/set parameters:
|
# Get/set parameters:
|
||||||
|
|
@ -200,10 +271,28 @@ class Parameterized(Parameterizable):
|
||||||
|
|
||||||
def __setattr__(self, name, val):
|
def __setattr__(self, name, val):
|
||||||
# override the default behaviour, if setting a param, so broadcasting can by used
|
# override the default behaviour, if setting a param, so broadcasting can by used
|
||||||
if hasattr(self, "_parameters_"):
|
if hasattr(self, "parameters"):
|
||||||
pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False)
|
pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False)
|
||||||
if name in pnames: self._parameters_[pnames.index(name)][:] = val; return
|
if name in pnames: self.parameters[pnames.index(name)][:] = val; return
|
||||||
object.__setattr__(self, name, val);
|
object.__setattr__(self, name, val);
|
||||||
|
|
||||||
|
#===========================================================================
|
||||||
|
# Pickling
|
||||||
|
#===========================================================================
|
||||||
|
def __setstate__(self, state):
|
||||||
|
super(Parameterized, self).__setstate__(state)
|
||||||
|
self._connect_parameters()
|
||||||
|
self._connect_fixes()
|
||||||
|
self._notify_parent_change()
|
||||||
|
|
||||||
|
self.parameters_changed()
|
||||||
|
def copy(self):
|
||||||
|
c = super(Parameterized, self).copy()
|
||||||
|
c._connect_parameters()
|
||||||
|
c._connect_fixes()
|
||||||
|
c._notify_parent_change()
|
||||||
|
return c
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Printing:
|
# Printing:
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
|
|
@ -211,22 +300,22 @@ class Parameterized(Parameterizable):
|
||||||
return self.hierarchy_name()
|
return self.hierarchy_name()
|
||||||
@property
|
@property
|
||||||
def flattened_parameters(self):
|
def flattened_parameters(self):
|
||||||
return [xi for x in self._parameters_ for xi in x.flattened_parameters]
|
return [xi for x in self.parameters for xi in x.flattened_parameters]
|
||||||
@property
|
@property
|
||||||
def _parameter_sizes_(self):
|
def _parameter_sizes_(self):
|
||||||
return [x.size for x in self._parameters_]
|
return [x.size for x in self.parameters]
|
||||||
@property
|
@property
|
||||||
def parameter_shapes(self):
|
def parameter_shapes(self):
|
||||||
return [xi for x in self._parameters_ for xi in x.parameter_shapes]
|
return [xi for x in self.parameters for xi in x.parameter_shapes]
|
||||||
@property
|
@property
|
||||||
def _constraints_str(self):
|
def _constraints_str(self):
|
||||||
return [cs for p in self._parameters_ for cs in p._constraints_str]
|
return [cs for p in self.parameters for cs in p._constraints_str]
|
||||||
@property
|
@property
|
||||||
def _priors_str(self):
|
def _priors_str(self):
|
||||||
return [cs for p in self._parameters_ for cs in p._priors_str]
|
return [cs for p in self.parameters for cs in p._priors_str]
|
||||||
@property
|
@property
|
||||||
def _description_str(self):
|
def _description_str(self):
|
||||||
return [xi for x in self._parameters_ for xi in x._description_str]
|
return [xi for x in self.parameters for xi in x._description_str]
|
||||||
@property
|
@property
|
||||||
def _ties_str(self):
|
def _ties_str(self):
|
||||||
return [','.join(x._ties_str) for x in self.flattened_parameters]
|
return [','.join(x._ties_str) for x in self.flattened_parameters]
|
||||||
|
|
@ -246,7 +335,7 @@ class Parameterized(Parameterizable):
|
||||||
to_print = []
|
to_print = []
|
||||||
for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
|
for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
|
||||||
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
|
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
|
||||||
# to_print = [format_spec.format(p=p, const=c, t=t) if isinstance(p, Param) else p.__str__(header=False) for p, c, t in itertools.izip(self._parameters_, constrs, ts)]
|
# to_print = [format_spec.format(p=p, const=c, t=t) if isinstance(p, Param) else p.__str__(header=False) for p, c, t in itertools.izip(self.parameters, constrs, ts)]
|
||||||
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
|
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
|
||||||
if header:
|
if header:
|
||||||
header = " {{0:<{0}s}} | {{1:^{1}s}} | {{2:^{2}s}} | {{3:^{3}s}} | {{4:^{4}s}}".format(nl, sl, cl, pl, tl).format(name, "Value", "Constraint", "Prior", "Tied to")
|
header = " {{0:<{0}s}} | {{1:^{1}s}} | {{2:^{2}s}} | {{3:^{3}s}} | {{4:^{4}s}}".format(nl, sl, cl, pl, tl).format(name, "Value", "Constraint", "Prior", "Tied to")
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ class VariationalPosterior(Parameterized):
|
||||||
def _raveled_index(self):
|
def _raveled_index(self):
|
||||||
index = np.empty(dtype=int, shape=0)
|
index = np.empty(dtype=int, shape=0)
|
||||||
size = 0
|
size = 0
|
||||||
for p in self._parameters_:
|
for p in self.parameters:
|
||||||
index = np.hstack((index, p._raveled_index()+size))
|
index = np.hstack((index, p._raveled_index()+size))
|
||||||
size += p._realsize_ if hasattr(p, '_realsize_') else p.size
|
size += p._realsize_ if hasattr(p, '_realsize_') else p.size
|
||||||
return index
|
return index
|
||||||
|
|
@ -96,10 +96,10 @@ class VariationalPosterior(Parameterized):
|
||||||
dc = self.__dict__.copy()
|
dc = self.__dict__.copy()
|
||||||
dc['mean'] = self.mean[s]
|
dc['mean'] = self.mean[s]
|
||||||
dc['variance'] = self.variance[s]
|
dc['variance'] = self.variance[s]
|
||||||
dc['_parameters_'] = copy.copy(self._parameters_)
|
dc['parameters'] = copy.copy(self.parameters)
|
||||||
n.__dict__.update(dc)
|
n.__dict__.update(dc)
|
||||||
n._parameters_[dc['mean']._parent_index_] = dc['mean']
|
n.parameters[dc['mean']._parent_index_] = dc['mean']
|
||||||
n._parameters_[dc['variance']._parent_index_] = dc['variance']
|
n.parameters[dc['variance']._parent_index_] = dc['variance']
|
||||||
n._gradient_array_ = None
|
n._gradient_array_ = None
|
||||||
oversize = self.size - self.mean.size - self.variance.size
|
oversize = self.size - self.mean.size - self.variance.size
|
||||||
n.size = n.mean.size + n.variance.size + oversize
|
n.size = n.mean.size + n.variance.size + oversize
|
||||||
|
|
@ -150,11 +150,11 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
||||||
dc['mean'] = self.mean[s]
|
dc['mean'] = self.mean[s]
|
||||||
dc['variance'] = self.variance[s]
|
dc['variance'] = self.variance[s]
|
||||||
dc['binary_prob'] = self.binary_prob[s]
|
dc['binary_prob'] = self.binary_prob[s]
|
||||||
dc['_parameters_'] = copy.copy(self._parameters_)
|
dc['parameters'] = copy.copy(self.parameters)
|
||||||
n.__dict__.update(dc)
|
n.__dict__.update(dc)
|
||||||
n._parameters_[dc['mean']._parent_index_] = dc['mean']
|
n.parameters[dc['mean']._parent_index_] = dc['mean']
|
||||||
n._parameters_[dc['variance']._parent_index_] = dc['variance']
|
n.parameters[dc['variance']._parent_index_] = dc['variance']
|
||||||
n._parameters_[dc['binary_prob']._parent_index_] = dc['binary_prob']
|
n.parameters[dc['binary_prob']._parent_index_] = dc['binary_prob']
|
||||||
n.ndim = n.mean.ndim
|
n.ndim = n.mean.ndim
|
||||||
n.shape = n.mean.shape
|
n.shape = n.mean.shape
|
||||||
n.num_data = n.mean.shape[0]
|
n.num_data = n.mean.shape[0]
|
||||||
|
|
|
||||||
|
|
@ -141,10 +141,10 @@ class Add(CombinationKernel):
|
||||||
from static import White, Bias
|
from static import White, Bias
|
||||||
target_mu = np.zeros(variational_posterior.shape)
|
target_mu = np.zeros(variational_posterior.shape)
|
||||||
target_S = np.zeros(variational_posterior.shape)
|
target_S = np.zeros(variational_posterior.shape)
|
||||||
for p1 in self._parameters_:
|
for p1 in self.parameters:
|
||||||
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
||||||
eff_dL_dpsi1 = dL_dpsi1.copy()
|
eff_dL_dpsi1 = dL_dpsi1.copy()
|
||||||
for p2 in self._parameters_:
|
for p2 in self.parameters:
|
||||||
if p2 is p1:
|
if p2 is p1:
|
||||||
continue
|
continue
|
||||||
if isinstance(p2, White):
|
if isinstance(p2, White):
|
||||||
|
|
@ -160,7 +160,7 @@ class Add(CombinationKernel):
|
||||||
|
|
||||||
def add(self, other, name='sum'):
|
def add(self, other, name='sum'):
|
||||||
if isinstance(other, Add):
|
if isinstance(other, Add):
|
||||||
other_params = other._parameters_[:]
|
other_params = other.parameters[:]
|
||||||
for p in other_params:
|
for p in other_params:
|
||||||
other.remove_parameter(p)
|
other.remove_parameter(p)
|
||||||
self.add_parameters(*other_params)
|
self.add_parameters(*other_params)
|
||||||
|
|
|
||||||
|
|
@ -183,9 +183,9 @@ class Kern(Parameterized):
|
||||||
assert isinstance(other, Kern), "only kernels can be added to kernels..."
|
assert isinstance(other, Kern), "only kernels can be added to kernels..."
|
||||||
from prod import Prod
|
from prod import Prod
|
||||||
#kernels = []
|
#kernels = []
|
||||||
#if isinstance(self, Prod): kernels.extend(self._parameters_)
|
#if isinstance(self, Prod): kernels.extend(self.parameters)
|
||||||
#else: kernels.append(self)
|
#else: kernels.append(self)
|
||||||
#if isinstance(other, Prod): kernels.extend(other._parameters_)
|
#if isinstance(other, Prod): kernels.extend(other.parameters)
|
||||||
#else: kernels.append(other)
|
#else: kernels.append(other)
|
||||||
return Prod([self, other], name)
|
return Prod([self, other], name)
|
||||||
|
|
||||||
|
|
@ -222,7 +222,7 @@ class CombinationKernel(Kern):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def parts(self):
|
def parts(self):
|
||||||
return self._parameters_
|
return self.parameters
|
||||||
|
|
||||||
def get_input_dim_active_dims(self, kernels, extra_dims = None):
|
def get_input_dim_active_dims(self, kernels, extra_dims = None):
|
||||||
#active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int))
|
#active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int))
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False):
|
||||||
|
|
||||||
for i in range(ard_params.shape[0]):
|
for i in range(ard_params.shape[0]):
|
||||||
c = Tango.nextMedium()
|
c = Tango.nextMedium()
|
||||||
bars.append(plot_bars(fig, ax, x, ard_params[i,:], c, kernel._parameters_[i].name, bottom=bottom))
|
bars.append(plot_bars(fig, ax, x, ard_params[i,:], c, kernel.parameters[i].name, bottom=bottom))
|
||||||
bottom += ard_params[i,:]
|
bottom += ard_params[i,:]
|
||||||
|
|
||||||
ax.set_xlim(-.5, kernel.input_dim - .5)
|
ax.set_xlim(-.5, kernel.input_dim - .5)
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ class ParameterizedTest(unittest.TestCase):
|
||||||
self.assertListEqual(self.test1.kern.param_array.tolist(), val[:2].tolist())
|
self.assertListEqual(self.test1.kern.param_array.tolist(), val[:2].tolist())
|
||||||
|
|
||||||
def test_add_parameter_already_in_hirarchy(self):
|
def test_add_parameter_already_in_hirarchy(self):
|
||||||
self.assertRaises(HierarchyError, self.test1.add_parameter, self.white._parameters_[0])
|
self.assertRaises(HierarchyError, self.test1.add_parameter, self.white.parameters[0])
|
||||||
|
|
||||||
def test_default_constraints(self):
|
def test_default_constraints(self):
|
||||||
self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops)
|
self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops)
|
||||||
|
|
|
||||||
|
|
@ -89,28 +89,28 @@ class Test(ListDictTestCase):
|
||||||
self.assertIs(pcopy.constraints, pcopy.rbf.lengthscale.constraints._param_index_ops)
|
self.assertIs(pcopy.constraints, pcopy.rbf.lengthscale.constraints._param_index_ops)
|
||||||
self.assertIs(pcopy.constraints, pcopy.linear.constraints._param_index_ops)
|
self.assertIs(pcopy.constraints, pcopy.linear.constraints._param_index_ops)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
self.assertListEqual(par.full_gradient.tolist(), pcopy.full_gradient.tolist())
|
self.assertListEqual(par.gradient_full.tolist(), pcopy.gradient_full.tolist())
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assertIsNot(par.param_array, pcopy.param_array)
|
self.assertIsNot(par.param_array, pcopy.param_array)
|
||||||
self.assertIsNot(par.full_gradient, pcopy.full_gradient)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
with tempfile.TemporaryFile('w+b') as f:
|
with tempfile.TemporaryFile('w+b') as f:
|
||||||
par.pickle(f)
|
par.pickle(f)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
pcopy = pickle.load(f)
|
pcopy = pickle.load(f)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
pcopy.gradient = 10
|
pcopy.gradient = 10
|
||||||
np.testing.assert_allclose(par.linear.full_gradient, pcopy.linear.full_gradient)
|
np.testing.assert_allclose(par.linear.gradient_full, pcopy.linear.gradient_full)
|
||||||
np.testing.assert_allclose(pcopy.linear.full_gradient, 10)
|
np.testing.assert_allclose(pcopy.linear.gradient_full, 10)
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
|
|
||||||
def test_model(self):
|
def test_model(self):
|
||||||
par = toy_rbf_1d_50(optimize=0, plot=0)
|
par = toy_rbf_1d_50(optimize=0, plot=0)
|
||||||
pcopy = par.copy()
|
pcopy = par.copy()
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
self.assertListEqual(par.full_gradient.tolist(), pcopy.full_gradient.tolist())
|
self.assertListEqual(par.gradient_full.tolist(), pcopy.gradient_full.tolist())
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assertIsNot(par.param_array, pcopy.param_array)
|
self.assertIsNot(par.param_array, pcopy.param_array)
|
||||||
self.assertIsNot(par.full_gradient, pcopy.full_gradient)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertTrue(pcopy.checkgrad())
|
self.assertTrue(pcopy.checkgrad())
|
||||||
self.assert_(np.any(pcopy.gradient!=0.0))
|
self.assert_(np.any(pcopy.gradient!=0.0))
|
||||||
with tempfile.TemporaryFile('w+b') as f:
|
with tempfile.TemporaryFile('w+b') as f:
|
||||||
|
|
@ -118,7 +118,7 @@ class Test(ListDictTestCase):
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
pcopy = pickle.load(f)
|
pcopy = pickle.load(f)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
np.testing.assert_allclose(par.full_gradient, pcopy.full_gradient)
|
np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assert_(pcopy.checkgrad())
|
self.assert_(pcopy.checkgrad())
|
||||||
|
|
||||||
|
|
@ -126,10 +126,10 @@ class Test(ListDictTestCase):
|
||||||
par = toy_rbf_1d_50(optimize=0, plot=0)
|
par = toy_rbf_1d_50(optimize=0, plot=0)
|
||||||
pcopy = GPRegression(par.X.copy(), par.Y.copy(), kernel=par.kern.copy())
|
pcopy = GPRegression(par.X.copy(), par.Y.copy(), kernel=par.kern.copy())
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
self.assertListEqual(par.full_gradient.tolist(), pcopy.full_gradient.tolist())
|
self.assertListEqual(par.gradient_full.tolist(), pcopy.gradient_full.tolist())
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assertIsNot(par.param_array, pcopy.param_array)
|
self.assertIsNot(par.param_array, pcopy.param_array)
|
||||||
self.assertIsNot(par.full_gradient, pcopy.full_gradient)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertTrue(pcopy.checkgrad())
|
self.assertTrue(pcopy.checkgrad())
|
||||||
self.assert_(np.any(pcopy.gradient!=0.0))
|
self.assert_(np.any(pcopy.gradient!=0.0))
|
||||||
pcopy.optimize('bfgs')
|
pcopy.optimize('bfgs')
|
||||||
|
|
@ -140,7 +140,7 @@ class Test(ListDictTestCase):
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
pcopy = pickle.load(f)
|
pcopy = pickle.load(f)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
np.testing.assert_allclose(par.full_gradient, pcopy.full_gradient)
|
np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assert_(pcopy.checkgrad())
|
self.assert_(pcopy.checkgrad())
|
||||||
|
|
||||||
|
|
@ -151,18 +151,18 @@ class Test(ListDictTestCase):
|
||||||
par.gradient = 10
|
par.gradient = 10
|
||||||
pcopy = par.copy()
|
pcopy = par.copy()
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
self.assertListEqual(par.full_gradient.tolist(), pcopy.full_gradient.tolist())
|
self.assertListEqual(par.gradient_full.tolist(), pcopy.gradient_full.tolist())
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assertIsNot(par.param_array, pcopy.param_array)
|
self.assertIsNot(par.param_array, pcopy.param_array)
|
||||||
self.assertIsNot(par.full_gradient, pcopy.full_gradient)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
with tempfile.TemporaryFile('w+b') as f:
|
with tempfile.TemporaryFile('w+b') as f:
|
||||||
par.pickle(f)
|
par.pickle(f)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
pcopy = pickle.load(f)
|
pcopy = pickle.load(f)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
pcopy.gradient = 10
|
pcopy.gradient = 10
|
||||||
np.testing.assert_allclose(par.full_gradient, pcopy.full_gradient)
|
np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full)
|
||||||
np.testing.assert_allclose(pcopy.mean.full_gradient, 10)
|
np.testing.assert_allclose(pcopy.mean.gradient_full, 10)
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
|
|
||||||
def test_model_concat(self):
|
def test_model_concat(self):
|
||||||
|
|
@ -170,10 +170,10 @@ class Test(ListDictTestCase):
|
||||||
par.randomize()
|
par.randomize()
|
||||||
pcopy = par.copy()
|
pcopy = par.copy()
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
self.assertListEqual(par.full_gradient.tolist(), pcopy.full_gradient.tolist())
|
self.assertListEqual(par.gradient_full.tolist(), pcopy.gradient_full.tolist())
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assertIsNot(par.param_array, pcopy.param_array)
|
self.assertIsNot(par.param_array, pcopy.param_array)
|
||||||
self.assertIsNot(par.full_gradient, pcopy.full_gradient)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertTrue(pcopy.checkgrad())
|
self.assertTrue(pcopy.checkgrad())
|
||||||
self.assert_(np.any(pcopy.gradient!=0.0))
|
self.assert_(np.any(pcopy.gradient!=0.0))
|
||||||
with tempfile.TemporaryFile('w+b') as f:
|
with tempfile.TemporaryFile('w+b') as f:
|
||||||
|
|
@ -181,7 +181,7 @@ class Test(ListDictTestCase):
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
pcopy = pickle.load(f)
|
pcopy = pickle.load(f)
|
||||||
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist())
|
||||||
np.testing.assert_allclose(par.full_gradient, pcopy.full_gradient)
|
np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertSequenceEqual(str(par), str(pcopy))
|
self.assertSequenceEqual(str(par), str(pcopy))
|
||||||
self.assert_(pcopy.checkgrad())
|
self.assert_(pcopy.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue